diff --git a/.changes/2.10.21.json b/.changes/2.10.21.json index 440fa04fbf2f..b4c55737c270 100644 --- a/.changes/2.10.21.json +++ b/.changes/2.10.21.json @@ -86,6 +86,11 @@ "type": "feature", "category": "AWS CodeCommit", "description": "This release adds support for creating pull request approval rules and pull request approval rule templates in AWS CodeCommit. This allows developers to block merges of pull requests, contingent on the approval rules being satisfiied." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "Upgrades to Instance Metadata Service version 2 (IMDS v2). With IMDS v2, a session token is used to make requests for EC2 instance metadata and credentials." } ] -} \ No newline at end of file +} diff --git a/.changes/2.15.10.json b/.changes/2.15.10.json new file mode 100644 index 000000000000..ad3529cf9916 --- /dev/null +++ b/.changes/2.15.10.json @@ -0,0 +1,36 @@ +{ + "version": "2.15.10", + "date": "2020-10-19", + "entries": [ + { + "type": "feature", + "category": "AWS Service Catalog", + "description": "An Admin can now update the launch role associated with a Provisioned Product. Admins and End Users can now view the launch role associated with a Provisioned Product." + }, + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "description": "This Patch Manager release now supports Common Vulnerabilities and Exposure (CVE) Ids for missing packages via the DescribeInstancePatches API." + }, + { + "type": "feature", + "category": "HTTP Client SPI", + "description": "Calling the SdkHttpFullRequest uri() builder method, query parameters of the provided URI will be kept.\nThis can be useful in case you want to provide an already fully formed URI like a callback URI." + }, + { + "type": "feature", + "category": "Amazon CloudFront", + "description": "Amazon CloudFront adds support for Origin Shield." + }, + { + "type": "feature", + "category": "Amazon DocumentDB with MongoDB compatibility", + "description": "Documentation updates for docdb" + }, + { + "type": "feature", + "category": "AWS Backup", + "description": "Documentation updates for Cryo" + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.11.json b/.changes/2.15.11.json new file mode 100644 index 000000000000..5f5b3016ea01 --- /dev/null +++ b/.changes/2.15.11.json @@ -0,0 +1,46 @@ +{ + "version": "2.15.11", + "date": "2020-10-20", + "entries": [ + { + "type": "bugfix", + "category": "Amazon S3", + "description": "Fixed an issue where metrics were not being collected for Amazon S3 (or other XML services)" + }, + { + "type": "feature", + "category": "AWS Batch", + "description": "Adding evaluateOnExit to job retry strategies." + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "description": "Fixed an issue where requestBody and asyncRequestBody were not visible in ExecutionInterceptor.afterMarshalling." + }, + { + "type": "feature", + "category": "AWS Elastic Beanstalk", + "description": "EnvironmentStatus enum update to include Aborting, LinkingFrom and LinkingTo" + }, + { + "type": "feature", + "category": "AWS AppSync", + "description": "Documentation updates to AppSync to correct several typos." + }, + { + "type": "feature", + "category": "Amazon S3", + "description": "Moved the logic for calculating the Content-MD5 checksums from s3 to sdk-core. As always, make sure to use a version of 'sdk-core' greater than or equal to your version of 's3'. If you use an old version of 'sdk-core' and a new version of 's3', you will receive errors that Content-MD5 is required." + }, + { + "type": "bugfix", + "category": "Amazon DynamoDB Enhanced Client", + "description": "Fix for handling special characters in attribute names with WRITE_IF_NOT_EXISTS update behavior" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.12.json b/.changes/2.15.12.json new file mode 100644 index 000000000000..dd77de617bf4 --- /dev/null +++ b/.changes/2.15.12.json @@ -0,0 +1,36 @@ +{ + "version": "2.15.12", + "date": "2020-10-21", + "entries": [ + { + "type": "feature", + "category": "AWS Global Accelerator", + "description": "This release adds support for specifying port overrides on AWS Global Accelerator endpoint groups." + }, + { + "type": "feature", + "category": "AWS Organizations", + "description": "AWS Organizations renamed the 'master account' to 'management account'." + }, + { + "type": "feature", + "category": "AWSKendraFrontendService", + "description": "This release adds custom data sources: a new data source type that gives you full control of the documents added, modified or deleted during a data source sync while providing run history metrics." + }, + { + "type": "feature", + "category": "AWS Glue", + "description": "AWS Glue crawlers now support incremental crawls for the Amazon Simple Storage Service (Amazon S3) data source." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "instance-storage-info nvmeSupport added to DescribeInstanceTypes API" + }, + { + "type": "feature", + "category": "Amazon CloudFront", + "description": "CloudFront adds support for managing the public keys for signed URLs and signed cookies directly in CloudFront (it no longer requires the AWS root account)." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.13.json b/.changes/2.15.13.json new file mode 100644 index 000000000000..293eccb6f4cb --- /dev/null +++ b/.changes/2.15.13.json @@ -0,0 +1,31 @@ +{ + "version": "2.15.13", + "date": "2020-10-22", + "entries": [ + { + "type": "feature", + "category": "Amazon Appflow", + "description": "Salesforce connector creation with customer provided client id and client secret, incremental pull configuration, salesforce upsert write operations and execution ID when on-demand flows are executed." + }, + { + "type": "feature", + "category": "Amazon Simple Notification Service", + "description": "SNS now supports a new class of topics: FIFO (First-In-First-Out). FIFO topics provide strictly-ordered, deduplicated, filterable, encryptable, many-to-many messaging at scale." + }, + { + "type": "feature", + "category": "Access Analyzer", + "description": "API Documentation updates for IAM Access Analyzer." + }, + { + "type": "feature", + "category": "AWS Service Catalog", + "description": "Documentation updates for servicecatalog" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.14.json b/.changes/2.15.14.json new file mode 100644 index 000000000000..1fcb96571caa --- /dev/null +++ b/.changes/2.15.14.json @@ -0,0 +1,21 @@ +{ + "version": "2.15.14", + "date": "2020-10-23", + "entries": [ + { + "type": "feature", + "category": "Amazon Macie 2", + "description": "This release of the Amazon Macie API includes miscellaneous updates and improvements to the documentation." + }, + { + "type": "feature", + "category": "Amazon QuickSight", + "description": "Support description on columns." + }, + { + "type": "feature", + "category": "AWS MediaTailor", + "description": "MediaTailor now supports ad marker passthrough for HLS. Use AdMarkerPassthrough to pass EXT-X-CUE-IN, EXT-X-CUE-OUT, and EXT-X-SPLICEPOINT-SCTE35 from origin manifests into personalized manifests." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.15.json b/.changes/2.15.15.json new file mode 100644 index 000000000000..2d738bc63a8a --- /dev/null +++ b/.changes/2.15.15.json @@ -0,0 +1,30 @@ +{ + "version": "2.15.15", + "date": "2020-10-26", + "entries": [ + { + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "This release enables customers to bring custom images for use with SageMaker Studio notebooks." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWSKendraFrontendService", + "contributor": "", + "description": "Amazon Kendra now supports indexing data from Confluence Server." + }, + { + "type": "feature", + "category": "Amazon Neptune", + "contributor": "", + "description": "This feature enables custom endpoints for Amazon Neptune clusters. Custom endpoints simplify connection management when clusters contain instances with different capacities and configuration settings." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.16.json b/.changes/2.15.16.json new file mode 100644 index 000000000000..11bf666c2268 --- /dev/null +++ b/.changes/2.15.16.json @@ -0,0 +1,18 @@ +{ + "version": "2.15.16", + "date": "2020-10-27", + "entries": [ + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS Glue", + "contributor": "", + "description": "AWS Glue machine learning transforms now support encryption-at-rest for labels and trained models." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.17.json b/.changes/2.15.17.json new file mode 100644 index 000000000000..bd993a2fa2ee --- /dev/null +++ b/.changes/2.15.17.json @@ -0,0 +1,42 @@ +{ + "version": "2.15.17", + "date": "2020-10-28", + "entries": [ + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS IoT", + "contributor": "", + "description": "This release adds support for GG-Managed Job Namespace" + }, + { + "type": "feature", + "category": "Amazon WorkMail", + "contributor": "", + "description": "Documentation update for Amazon WorkMail" + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": null, + "description": "Fixed an issue where marshalling of a modeled object was not honoring the has* method on a list/map." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "AWS Nitro Enclaves general availability. Added support to RunInstances for creating enclave-enabled EC2 instances. New APIs to associate an ACM certificate with an IAM role, for enclave consumption." + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": null, + "description": "Fixed an issue where the toString/equals/hashCode on a modeled object were not honoring the has* methods for lists and maps." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.18.json b/.changes/2.15.18.json new file mode 100644 index 000000000000..8cbbc17e81db --- /dev/null +++ b/.changes/2.15.18.json @@ -0,0 +1,48 @@ +{ + "version": "2.15.18", + "date": "2020-10-29", + "entries": [ + { + "type": "feature", + "category": "AWS Marketplace Commerce Analytics", + "contributor": "", + "description": "Documentation updates for marketplacecommerceanalytics to specify four data sets which are deprecated." + }, + { + "type": "feature", + "category": "Amazon Simple Email Service", + "contributor": "", + "description": "This release enables customers to manage their own contact lists and end-user subscription preferences." + }, + { + "type": "feature", + "category": "Elastic Load Balancing", + "contributor": "", + "description": "Application Load Balancer (ALB) now supports the gRPC protocol-version. With this release, customers can use ALB to route and load balance gRPC traffic between gRPC enabled clients and microservices." + }, + { + "type": "feature", + "category": "AWS Storage Gateway", + "contributor": "", + "description": "Adding support for access based enumeration on SMB file shares, file share visibility on SMB file shares, and file upload notifications for all file shares" + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "Support for Appliance mode on Transit Gateway that simplifies deployment of stateful network appliances. Added support for AWS Client VPN Self-Service Portal." + }, + { + "type": "feature", + "category": "Amazon API Gateway", + "contributor": "", + "description": "Support disabling the default execute-api endpoint for REST APIs." + }, + { + "type": "feature", + "category": "CodeArtifact", + "contributor": "", + "description": "Add support for tagging of CodeArtifact domain and repository resources." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.19.json b/.changes/2.15.19.json new file mode 100644 index 000000000000..16c9e44bb4c6 --- /dev/null +++ b/.changes/2.15.19.json @@ -0,0 +1,54 @@ +{ + "version": "2.15.19", + "date": "2020-10-30", + "entries": [ + { + "type": "feature", + "category": "Braket", + "contributor": "", + "description": "This release supports tagging for Amazon Braket quantum-task resources. It also supports tag-based access control for quantum-task APIs." + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Fixing FilteringSubscriber and LimitingSubscriber to complete when subscribing criteria is completed." + }, + { + "type": "feature", + "category": "AWS Database Migration Service", + "contributor": "", + "description": "Adding DocDbSettings to support DocumentDB as a source." + }, + { + "type": "feature", + "category": "EC2 Image Builder", + "contributor": "", + "description": "This feature increases the number of accounts that can be added to the Launch permissions within an Image Builder Distribution configuration." + }, + { + "type": "feature", + "category": "Amazon ElastiCache", + "contributor": "", + "description": "Documentation updates for AWS ElastiCache" + }, + { + "type": "feature", + "category": "Amazon Simple Notification Service", + "contributor": "", + "description": "Documentation updates for Amazon SNS" + }, + { + "type": "feature", + "category": "Amazon Macie 2", + "contributor": "", + "description": "This release of the Amazon Macie API adds an eqExactMatch operator for filtering findings. With this operator you can increase the precision of your finding filters and suppression rules." + }, + { + "type": "feature", + "category": "AWS Elemental MediaLive", + "contributor": "", + "description": "Support for HLS discontinuity tags in the child manifests. Support for incomplete segment behavior in the media output. Support for automatic input failover condition settings." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.20.json b/.changes/2.15.20.json new file mode 100644 index 000000000000..01a97f5581b6 --- /dev/null +++ b/.changes/2.15.20.json @@ -0,0 +1,48 @@ +{ + "version": "2.15.20", + "date": "2020-11-02", + "entries": [ + { + "type": "bugfix", + "category": "AWS DynamoDB Enhanced Client", + "contributor": "", + "description": "Publisher streams returned by async resources in the DynamoDB Enhanced Client now correctly handle mapping errors when they are encountered in the stream by calling onError on the subscriber and then implicitly cancelling the subscription. Previously the stream would just permanently hang and never complete." + }, + { + "type": "deprecation", + "category": "AWS SSO OIDC", + "contributor": "", + "description": "Renamed/deprecated 'error_description' fields in exceptions in favor of 'errorDescription'." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Added code generation validation that customer-visible identifiers are idiomatic (do not contain underscores). Services with underscores in their models can use rename customizations to fix these issues, or apply the 'underscoresInNameBehavior = ALLOW' customization." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Upgrade `org.apache.httpcomponents:httpclient` version to `4.5.13`" + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": null, + "description": "Fixing race condition in EventStreamAsyncResponseTransformer. Field eventsToDeliver is a LinkedList, i.e., not thread-safe. Accesses to field eventsToDeliver are protected by synchronization on itself, but not in 1 location." + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "The mapped publisher returned by SdkPublisher.map will now handle exceptions thrown by the mapping function by calling onError on its subscriber and then cancelling the subscription rather than throwing it back to the publishing process when it attempts to publish data." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "This release adds support for the following features: 1. P4d instances based on NVIDIA A100 GPUs. 2. NetworkCardIndex attribute to support multiple network cards." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.21.json b/.changes/2.15.21.json new file mode 100644 index 000000000000..6251577513f5 --- /dev/null +++ b/.changes/2.15.21.json @@ -0,0 +1,72 @@ +{ + "version": "2.15.21", + "date": "2020-11-04", + "entries": [ + { + "type": "feature", + "category": "AWS Service Catalog", + "contributor": "", + "description": "Service Catalog API ListPortfolioAccess can now support a maximum PageSize of 100." + }, + { + "type": "feature", + "category": "Amazon Elasticsearch Service", + "contributor": "", + "description": "Amazon Elasticsearch Service now supports native SAML authentication that seamlessly integrates with the customers' existing SAML 2.0 Identity Provider (IdP)." + }, + { + "type": "feature", + "category": "AWSMarketplace Metering", + "contributor": "", + "description": "Adding Vendor Tagging Support in MeterUsage and BatchMeterUsage API." + }, + { + "type": "feature", + "category": "AmazonMQ", + "contributor": "", + "description": "Amazon MQ introduces support for RabbitMQ, a popular message-broker with native support for AMQP 0.9.1. You can now create fully-managed RabbitMQ brokers in the cloud." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Auto Scaling", + "contributor": "", + "description": "Capacity Rebalance helps you manage and maintain workload availability during Spot interruptions by proactively augmenting your Auto Scaling group with a new instance before interrupting an old one." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "Added support for Client Connect Handler for AWS Client VPN. Fleet supports launching replacement instances in response to Capacity Rebalance recommendation." + }, + { + "type": "feature", + "category": "AWS X-Ray", + "contributor": "", + "description": "Releasing new APIs GetInsightSummaries, GetInsightEvents, GetInsight, GetInsightImpactGraph and updating GetTimeSeriesServiceStatistics API for AWS X-Ray Insights feature" + }, + { + "type": "feature", + "category": "Amazon CloudWatch", + "contributor": "", + "description": "Documentation updates for monitoring" + }, + { + "type": "feature", + "category": "Amazon Transcribe Streaming Service", + "contributor": "", + "description": "With this release, Amazon Transcribe now supports real-time transcription from audio sources in Italian (it-IT) and German(de-DE)." + }, + { + "type": "feature", + "category": "AWS IoT", + "contributor": "", + "description": "Updated API documentation and added paginator for AWS Iot Registry ListThingPrincipals API." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.22.json b/.changes/2.15.22.json new file mode 100644 index 000000000000..ca04f429f1d0 --- /dev/null +++ b/.changes/2.15.22.json @@ -0,0 +1,72 @@ +{ + "version": "2.15.22", + "date": "2020-11-05", + "entries": [ + { + "type": "feature", + "category": "Amazon Fraud Detector", + "contributor": "", + "description": "Added support for deleting resources like Variables, ExternalModels, Outcomes, Models, ModelVersions, Labels, EventTypes and EntityTypes. Updated DeleteEvent operation to catch missing exceptions." + }, + { + "type": "feature", + "category": "AWS App Mesh", + "contributor": "", + "description": "This release adds circuit breaking capabilities to your mesh with connection pooling and outlier detection support." + }, + { + "type": "feature", + "category": "Amazon DynamoDB", + "contributor": "", + "description": "This release adds a new ReplicaStatus INACCESSIBLE_ENCRYPTION_CREDENTIALS for the Table description, indicating when a key used to encrypt a regional replica table is not accessible." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon EventBridge", + "contributor": "", + "description": "With this release, customers can now reprocess past events by storing the events published on event bus in an encrypted archive." + }, + { + "type": "feature", + "category": "Amazon CloudWatch Events", + "contributor": "", + "description": "With this release, customers can now reprocess past events by storing the events published on event bus in an encrypted archive." + }, + { + "type": "feature", + "category": "Amazon Elasticsearch Service", + "contributor": "", + "description": "Amazon Elasticsearch Service now provides the ability to define a custom endpoint for your domain and link an SSL certificate from ACM, making it easier to refer to Kibana and the domain endpoint." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "contributor": "", + "description": "Supports a new parameter to set the max allocated storage in gigabytes for the CreateDBInstanceReadReplica API." + }, + { + "type": "feature", + "category": "AWS Lambda", + "contributor": "", + "description": "Support Amazon MQ as an Event Source." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "Documentation updates for EC2." + }, + { + "type": "feature", + "category": "AWSKendraFrontendService", + "contributor": "", + "description": "Amazon Kendra now supports providing user context in your query requests, Tokens can be JSON or JWT format. This release also introduces support for Confluence cloud datasources." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.23.json b/.changes/2.15.23.json new file mode 100644 index 000000000000..403b6fb79a9d --- /dev/null +++ b/.changes/2.15.23.json @@ -0,0 +1,36 @@ +{ + "version": "2.15.23", + "date": "2020-11-06", + "entries": [ + { + "type": "feature", + "category": "AWS IoT SiteWise", + "contributor": "", + "description": "Remove the CreatePresignedPortalUrl API" + }, + { + "type": "feature", + "category": "AWS Elemental MediaLive", + "contributor": "", + "description": "Support for SCTE35 ad markers in OnCuePoint style in RTMP outputs." + }, + { + "type": "feature", + "category": "Amazon Data Lifecycle Manager", + "contributor": "", + "description": "Amazon Data Lifecycle Manager now supports the creation and retention of EBS-backed Amazon Machine Images" + }, + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "contributor": "", + "description": "Documentation updates for Systems Manager" + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "Network card support with four new attributes: NetworkCardIndex, NetworkPerformance, DefaultNetworkCardIndex, and MaximumNetworkInterfaces, added to the DescribeInstanceTypes API." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.24.json b/.changes/2.15.24.json new file mode 100644 index 000000000000..47b0cda8e15c --- /dev/null +++ b/.changes/2.15.24.json @@ -0,0 +1,66 @@ +{ + "version": "2.15.24", + "date": "2020-11-09", + "entries": [ + { + "type": "feature", + "category": "Amazon Elasticsearch Service", + "contributor": "", + "description": "Adding support for package versioning in Amazon Elasticsearch Service" + }, + { + "type": "feature", + "category": "AWS Storage Gateway", + "contributor": "", + "description": "Added bandwidth rate limit schedule for Tape and Volume Gateways" + }, + { + "type": "feature", + "category": "Amazon DynamoDB", + "contributor": "", + "description": "This release adds supports for exporting Amazon DynamoDB table data to Amazon S3 to perform analytics at any scale." + }, + { + "type": "feature", + "category": "AWS DataSync", + "contributor": "", + "description": "DataSync now enables customers to adjust the network bandwidth used by a running AWS DataSync task." + }, + { + "type": "feature", + "category": "Amazon Simple Storage Service", + "contributor": "", + "description": "S3 Intelligent-Tiering adds support for Archive and Deep Archive Access tiers; S3 Replication adds replication metrics and failure notifications, brings feature parity for delete marker replication" + }, + { + "type": "feature", + "category": "Amazon FSx", + "contributor": "", + "description": "This release adds support for creating DNS aliases for Amazon FSx for Windows File Server, and using AWS Backup to automate scheduled, policy-driven backup plans for Amazon FSx file systems." + }, + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "contributor": "", + "description": "add a new filter to allow customer to filter automation executions by using resource-group which used for execute automation" + }, + { + "type": "feature", + "category": "AWS IoT Analytics", + "contributor": "", + "description": "AWS IoT Analytics now supports Late Data Notifications for datasets, dataset content creation using previous version IDs, and includes the LastMessageArrivalTime attribute for channels and datastores." + }, + { + "type": "feature", + "category": "Amazon EC2 Container Service", + "contributor": "", + "description": "This release provides native support for specifying Amazon FSx for Windows File Server file systems as volumes in your Amazon ECS task definitions." + }, + { + "type": "feature", + "category": "Amazon Macie 2", + "contributor": "", + "description": "Sensitive data findings in Amazon Macie now include enhanced location data for Apache Avro object containers and Apache Parquet files." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.25.json b/.changes/2.15.25.json new file mode 100644 index 000000000000..5dbb8443dabe --- /dev/null +++ b/.changes/2.15.25.json @@ -0,0 +1,42 @@ +{ + "version": "2.15.25", + "date": "2020-11-10", + "entries": [ + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "This release adds support for Gateway Load Balancer VPC endpoints and VPC endpoint services" + }, + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "contributor": "", + "description": "Add SessionId as a filter for DescribeSessions API" + }, + { + "type": "feature", + "category": "Elastic Load Balancing", + "contributor": "", + "description": "Added support for Gateway Load Balancers, which make it easy to deploy, scale, and run third-party virtual networking appliances." + }, + { + "type": "feature", + "category": "Auto Scaling", + "contributor": "", + "description": "Documentation updates and corrections for Amazon EC2 Auto Scaling API Reference and SDKs." + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Replaced class loading from Thread.currentThread().getContextClassLoader() to ClassLoaderHelper in ProfileCredentialsUtils and WebIdentityCredentialsUtils, since it was causing Class not found error." + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": "@frosforever", + "description": "Fix default client error to have spaces between words." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.26.json b/.changes/2.15.26.json new file mode 100644 index 000000000000..57558403b69d --- /dev/null +++ b/.changes/2.15.26.json @@ -0,0 +1,54 @@ +{ + "version": "2.15.26", + "date": "2020-11-11", + "entries": [ + { + "type": "feature", + "category": "AWS Service Catalog", + "contributor": "", + "description": "Adding support to remove a Provisioned Product launch role via UpdateProvisionedProductProperties" + }, + { + "type": "feature", + "category": "Netty NIO HTTP Client", + "contributor": "", + "description": "Upgrade Netty libraries to `4.1.53.Final`, and `netty-tcnative-boringssl-static` to `2.0.34.Final`." + }, + { + "type": "feature", + "category": "AWS Glue DataBrew", + "contributor": "", + "description": "This is the initial SDK release for AWS Glue DataBrew. DataBrew is a visual data preparation tool that enables users to clean and normalize data without writing any code." + }, + { + "type": "feature", + "category": "AWS Elemental MediaConvert", + "contributor": "", + "description": "AWS Elemental MediaConvert SDK has added support for Automated ABR encoding and improved the reliability of embedded captions in accelerated outputs." + }, + { + "type": "bugfix", + "category": "Netty NIO HTTP Client", + "contributor": "", + "description": "Fix a bug where the Netty HTTP client can leak memory when a response stream is cancelled prematurely but the upstream publisher continues to invoke onNext for some time before stopping. Fixes [#2051](https://github.com/aws/aws-sdk-java-v2/issues/2051)." + }, + { + "type": "feature", + "category": "AWS Amplify", + "contributor": "", + "description": "Whereas previously custom headers were set via the app's buildspec, custom headers can now be set directly on the Amplify app for both ci/cd and manual deploy apps." + }, + { + "type": "feature", + "category": "Amazon Forecast Service", + "contributor": "", + "description": "Providing support of custom quantiles in CreatePredictor API." + }, + { + "type": "feature", + "category": "Amazon QuickSight", + "contributor": "", + "description": "QuickSight now supports Column-level security and connecting to Oracle data source." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.27.json b/.changes/2.15.27.json new file mode 100644 index 000000000000..cc9e30fd7cbf --- /dev/null +++ b/.changes/2.15.27.json @@ -0,0 +1,48 @@ +{ + "version": "2.15.27", + "date": "2020-11-12", + "entries": [ + { + "type": "feature", + "category": "AWS IoT", + "contributor": "", + "description": "This release adds a batchMode parameter to the IotEvents, IotAnalytics, and Firehose actions which allows customers to send an array of messages to the corresponding services" + }, + { + "type": "feature", + "category": "Amazon Personalize Runtime", + "contributor": "", + "description": "Adds support to use dynamic filters with Personalize." + }, + { + "type": "feature", + "category": "Amazon Lex Model Building Service", + "contributor": "", + "description": "Lex now supports es-ES, it-IT, fr-FR and fr-CA locales" + }, + { + "type": "feature", + "category": "AWS RoboMaker", + "contributor": "", + "description": "This release introduces Robomaker Worldforge TagsOnCreate which allows customers to tag worlds as they are being generated by providing the tags while configuring a world generation job." + }, + { + "type": "feature", + "category": "AWS Service Catalog App Registry", + "contributor": "", + "description": "AWS Service Catalog AppRegistry provides a repository of your applications, their resources, and the application metadata that you use within your enterprise." + }, + { + "type": "feature", + "category": "Amazon Lightsail", + "contributor": "", + "description": "This release adds support for Amazon Lightsail container services. You can now create a Lightsail container service, and deploy Docker images to it." + }, + { + "type": "feature", + "category": "Amazon Polly", + "contributor": "", + "description": "Amazon Polly adds new Australian English female voice - Olivia. Olivia is available as Neural voice only." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.28.json b/.changes/2.15.28.json new file mode 100644 index 000000000000..324074327277 --- /dev/null +++ b/.changes/2.15.28.json @@ -0,0 +1,30 @@ +{ + "version": "2.15.28", + "date": "2020-11-13", + "entries": [ + { + "type": "feature", + "category": "Elastic Load Balancing", + "contributor": "", + "description": "Adds dualstack support for Network Load Balancers (TCP/TLS only), an attribute for WAF fail open for Application Load Balancers, and an attribute for connection draining for Network Load Balancers." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS Shield", + "contributor": "", + "description": "This release adds APIs for two new features: 1) Allow customers to bundle resources into protection groups and treat as a single unit. 2) Provide per-account event summaries to all AWS customers." + }, + { + "type": "feature", + "category": "Amazon Textract", + "contributor": "", + "description": "AWS Textract now allows customers to specify their own KMS key to be used for asynchronous jobs output results, AWS Textract now also recognizes handwritten text from English documents." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.29.json b/.changes/2.15.29.json new file mode 100644 index 000000000000..2e5641407367 --- /dev/null +++ b/.changes/2.15.29.json @@ -0,0 +1,60 @@ +{ + "version": "2.15.29", + "date": "2020-11-16", + "entries": [ + { + "type": "feature", + "category": "AWS Database Migration Service", + "contributor": "", + "description": "Adding MoveReplicationTask feature to move replication tasks between instances" + }, + { + "type": "feature", + "category": "AWS IoT Secure Tunneling", + "contributor": "", + "description": "Support using multiple data streams per tunnel using the Secure Tunneling multiplexing feature." + }, + { + "type": "feature", + "category": "Synthetics", + "contributor": "", + "description": "AWS Synthetics now supports Environment Variables to assign runtime parameters in the canary scripts." + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "This feature enables customers to encrypt their Amazon SageMaker Studio storage volumes with customer master keys (CMKs) managed by them in AWS Key Management Service (KMS)." + }, + { + "type": "feature", + "category": "AWS IoT SiteWise", + "contributor": "", + "description": "This release supports Unicode characters for string operations in formulae computes in SiteWise. For more information, search for SiteWise in Amazon What's new or refer the SiteWise documentation." + }, + { + "type": "feature", + "category": "AWS CodePipeline", + "contributor": "", + "description": "We show details about inbound executions and id of action executions in GetPipelineState API. We also add ConflictException to StartPipelineExecution, RetryStageExecution, StopPipelineExecution APIs." + }, + { + "type": "feature", + "category": "AWS Service Catalog", + "contributor": "", + "description": "Support import of CloudFormation stacks into Service Catalog provisioned products." + }, + { + "type": "feature", + "category": "Amazon QuickSight", + "contributor": "", + "description": "Adding new parameters for dashboard persistence" + }, + { + "type": "feature", + "category": "Amazon Simple Notification Service", + "contributor": "", + "description": "Documentation updates for Amazon SNS." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.30.json b/.changes/2.15.30.json new file mode 100644 index 000000000000..9bcbb635553e --- /dev/null +++ b/.changes/2.15.30.json @@ -0,0 +1,42 @@ +{ + "version": "2.15.30", + "date": "2020-11-17", + "entries": [ + { + "type": "feature", + "category": "Amazon Relational Database Service", + "contributor": "", + "description": "Support copy-db-snapshot in the one region on cross clusters and local cluster for RDSonVmware. Add target-custom-availability-zone parameter to specify where a snapshot should be copied." + }, + { + "type": "feature", + "category": "Firewall Management Service", + "contributor": "", + "description": "Added Firewall Manager policy support for AWS Network Firewall resources." + }, + { + "type": "feature", + "category": "Amazon Connect Service", + "contributor": "", + "description": "This release adds support for user hierarchy group and user hierarchy structure. For details, see the Release Notes in the Amazon Connect Administrator Guide." + }, + { + "type": "feature", + "category": "Amazon Macie 2", + "contributor": "", + "description": "The Amazon Macie API now has a lastRunErrorStatus property to indicate if account- or bucket-level errors occurred during the run of a one-time classification job or the latest run of a recurring job." + }, + { + "type": "feature", + "category": "AWS Network Firewall", + "contributor": "", + "description": "(New Service) AWS Network Firewall is a managed network layer firewall service that makes it easy to secure your virtual private cloud (VPC) networks and block malicious traffic." + }, + { + "type": "feature", + "category": "Amazon Chime", + "contributor": "", + "description": "This release adds CRUD APIs for Amazon Chime SipMediaApplications and SipRules. It also adds the API for creating outbound PSTN calls for Amazon Chime meetings." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.31.json b/.changes/2.15.31.json new file mode 100644 index 000000000000..de3ca883f608 --- /dev/null +++ b/.changes/2.15.31.json @@ -0,0 +1,54 @@ +{ + "version": "2.15.31", + "date": "2020-11-18", + "entries": [ + { + "type": "feature", + "category": "AWS S3 Control", + "contributor": "", + "description": "AWS S3 Storage Lens provides visibility into your storage usage and activity trends at the organization or account level, with aggregations by Region, storage class, bucket, and prefix." + }, + { + "type": "feature", + "category": "Amazon ElastiCache", + "contributor": "", + "description": "Adding Memcached 1.6 to parameter family" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS Outposts", + "contributor": "", + "description": "Mark the Name parameter in CreateOutpost as required." + }, + { + "type": "feature", + "category": "AWS Backup", + "contributor": "", + "description": "AWS Backup now supports cross-account backup, enabling AWS customers to securely copy their backups across their AWS accounts within their AWS organizations." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "EC2 Fleet adds support of DeleteFleets API for instant type fleets. Now you can delete an instant type fleet and terminate all associated instances with a single API call." + }, + { + "type": "feature", + "category": "AWS CodeBuild", + "contributor": "", + "description": "AWS CodeBuild - Adding Status field for Report Group" + }, + { + "type": "feature", + "category": "AWS CloudFormation", + "contributor": "", + "description": "This release adds ChangeSets support for Nested Stacks. ChangeSets offer a preview of how proposed changes to a stack might impact existing resources or create new ones." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.32.json b/.changes/2.15.32.json new file mode 100644 index 000000000000..1659d30f59fd --- /dev/null +++ b/.changes/2.15.32.json @@ -0,0 +1,84 @@ +{ + "version": "2.15.32", + "date": "2020-11-19", + "entries": [ + { + "type": "feature", + "category": "Auto Scaling", + "contributor": "", + "description": "You can now create Auto Scaling groups with multiple launch templates using a mixed instances policy, making it easy to deploy an AMI with an architecture that is different from the rest of the group." + }, + { + "type": "feature", + "category": "Amazon Lex Model Building Service", + "contributor": "", + "description": "Amazon Lex supports managing input and output contexts as well as default values for slots." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS Elemental MediaLive", + "contributor": "", + "description": "The AWS Elemental MediaLive APIs and SDKs now support the ability to see the software update status on Link devices" + }, + { + "type": "feature", + "category": "Amazon Redshift", + "contributor": "", + "description": "Amazon Redshift support for returning ClusterNamespaceArn in describeClusters" + }, + { + "type": "feature", + "category": "Amazon EventBridge", + "contributor": "", + "description": "EventBridge now supports Resource-based policy authorization on event buses. This enables cross-account PutEvents API calls, creating cross-account rules, and simplifies permission management." + }, + { + "type": "feature", + "category": "AWS Directory Service", + "contributor": "", + "description": "Adding multi-region replication feature for AWS Managed Microsoft AD" + }, + { + "type": "feature", + "category": "Amazon Kinesis Analytics", + "contributor": "", + "description": "Amazon Kinesis Data Analytics now supports building and running streaming applications using Apache Flink 1.11 and provides a way to access the Apache Flink dashboard for supported Flink versions." + }, + { + "type": "feature", + "category": "AWS Lambda", + "contributor": "", + "description": "Added the starting position and starting position timestamp to ESM Configuration. Now customers will be able to view these fields for their ESM." + }, + { + "type": "feature", + "category": "Amazon Lex Runtime Service", + "contributor": "", + "description": "Amazon Lex now supports the ability to view and manage active contexts associated with a user session." + }, + { + "type": "feature", + "category": "AWS Cost Explorer Service", + "contributor": "", + "description": "Additional metadata that may be applicable to the recommendation." + }, + { + "type": "feature", + "category": "Amazon CloudWatch Events", + "contributor": "", + "description": "EventBridge now supports Resource-based policy authorization on event buses. This enables cross-account PutEvents API calls, creating cross-account rules, and simplifies permission management." + }, + { + "type": "feature", + "category": "AWS Glue", + "contributor": "", + "description": "Adding support for Glue Schema Registry. The AWS Glue Schema Registry is a new feature that allows you to centrally discover, control, and evolve data stream schemas." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.33.json b/.changes/2.15.33.json new file mode 100644 index 000000000000..19d7726cfee1 --- /dev/null +++ b/.changes/2.15.33.json @@ -0,0 +1,78 @@ +{ + "version": "2.15.33", + "date": "2020-11-20", + "entries": [ + { + "type": "feature", + "category": "AWS CloudHSM V2", + "contributor": "", + "description": "Added managed backup retention, a feature that enables customers to retain backups for a configurable period after which CloudHSM service will automatically delete them." + }, + { + "type": "feature", + "category": "Amazon Cognito Identity", + "contributor": "", + "description": "Added SDK pagination support for ListIdentityPools" + }, + { + "type": "feature", + "category": "Managed Streaming for Kafka", + "contributor": "", + "description": "This release adds support for PER TOPIC PER PARTITION monitoring on AWS MSK clusters." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon Connect Service", + "contributor": "", + "description": "This release adds a set of Amazon Connect APIs to programmatically control instance creation, modification, description and deletion." + }, + { + "type": "feature", + "category": "Amazon Macie 2", + "contributor": "", + "description": "The Amazon Macie API now provides S3 bucket metadata that indicates whether any one-time or recurring classification jobs are configured to analyze data in a bucket." + }, + { + "type": "feature", + "category": "Amazon Simple Storage Service", + "contributor": "", + "description": "Add new documentation regarding automatically generated Content-MD5 headers when using the SDK or CLI." + }, + { + "type": "feature", + "category": "Amazon Chime", + "contributor": "", + "description": "The Amazon Chime SDK for messaging provides the building blocks needed to build chat and other real-time collaboration features." + }, + { + "type": "feature", + "category": "AWS Service Catalog App Registry", + "contributor": "", + "description": "AWS Service Catalog AppRegistry Documentation update" + }, + { + "type": "feature", + "category": "Amazon CodeGuru Reviewer", + "contributor": "", + "description": "This release supports tagging repository association resources in Amazon CodeGuru Reviewer." + }, + { + "type": "feature", + "category": "AWS Single Sign-on", + "contributor": "", + "description": "Added support for retrieving SSO credentials: https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html." + }, + { + "type": "feature", + "category": "AWS App Mesh", + "contributor": "", + "description": "This release makes tag value a required attribute of the tag's key-value pair." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.34.json b/.changes/2.15.34.json new file mode 100644 index 000000000000..bf6631bf63c0 --- /dev/null +++ b/.changes/2.15.34.json @@ -0,0 +1,138 @@ +{ + "version": "2.15.34", + "date": "2020-11-23", + "entries": [ + { + "type": "feature", + "category": "Amazon DynamoDB", + "contributor": "", + "description": "With this release, you can capture data changes in any Amazon DynamoDB table as an Amazon Kinesis data stream. You also can use PartiQL (SQL-compatible language) to manipulate data in DynamoDB tables." + }, + { + "type": "feature", + "category": "AWS Outposts", + "contributor": "", + "description": "Support specifying tags during the creation of the Outpost resource. Tags are now returned in the response body of Outpost APIs." + }, + { + "type": "feature", + "category": "AWS Single Sign-On Admin", + "contributor": "", + "description": "AWS Single Sign-On now enables attribute-based access control for workforce identities to simplify permissions in AWS" + }, + { + "type": "feature", + "category": "AWS Glue", + "contributor": "", + "description": "Feature1 - Glue crawler adds data lineage configuration option. Feature2 - AWS Glue Data Catalog adds APIs for PartitionIndex creation and deletion as part of Enhancement Partition Management feature." + }, + { + "type": "feature", + "category": "AWS IoT", + "contributor": "", + "description": "This release enables users to identify different file types in the over-the-air update (OTA) functionality using fileType parameter for CreateOTAUpdate API" + }, + { + "type": "feature", + "category": "Amazon Elastic MapReduce", + "contributor": "", + "description": "Add API support for EMR Studio, a new notebook-first IDE for data scientists and data engineers with single sign-on, Jupyter notebooks, automated infrastructure provisioning, and job diagnosis." + }, + { + "type": "feature", + "category": "Amazon Translate", + "contributor": "", + "description": "This update adds new operations to create and manage parallel data in Amazon Translate. Parallel data is a resource that you can use to run Active Custom Translation jobs." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon EC2 Container Service", + "contributor": "", + "description": "This release adds support for updating capacity providers, specifying custom instance warmup periods for capacity providers, and using deployment circuit breaker for your ECS Services." + }, + { + "type": "feature", + "category": "CodeArtifact", + "contributor": "", + "description": "Add support for the NuGet package format." + }, + { + "type": "feature", + "category": "AWS License Manager", + "contributor": "", + "description": "AWS License Manager now provides the ability for license administrators to be able to associate license configurations to AMIs shared with their AWS account" + }, + { + "type": "feature", + "category": "Auto Scaling", + "contributor": "", + "description": "Documentation updates and corrections for Amazon EC2 Auto Scaling API Reference and SDKs." + }, + { + "type": "feature", + "category": "AWS CodeStar connections", + "contributor": "", + "description": "Added support for the UpdateHost API." + }, + { + "type": "feature", + "category": "Managed Streaming for Kafka", + "contributor": "", + "description": "Adding MAINTENANCE and REBOOTING_BROKER to Cluster states." + }, + { + "type": "feature", + "category": "Amazon Timestream Query", + "contributor": "", + "description": "Amazon Timestream now supports \"QueryStatus\" in Query API which has information about cumulative bytes scanned, metered, as well as progress percentage for the query." + }, + { + "type": "feature", + "category": "Amazon ElastiCache", + "contributor": "", + "description": "Documentation updates for elasticache" + }, + { + "type": "feature", + "category": "AWS SecurityHub", + "contributor": "", + "description": "Updated the account management API to support the integration with AWS Organizations. Added new methods to allow users to view and manage the delegated administrator account for Security Hub." + }, + { + "type": "feature", + "category": "Amazon Forecast Service", + "contributor": "", + "description": "Releasing the set of PredictorBacktestExportJob APIs which allow customers to export backtest values and item-level metrics data from Predictor training." + }, + { + "type": "feature", + "category": "Amazon CloudWatch Application Insights", + "contributor": "", + "description": "Add Detected Workload to ApplicationComponent which shows the workloads that installed in the component" + }, + { + "type": "feature", + "category": "AWS Signer", + "contributor": "", + "description": "AWS Signer is launching code-signing for AWS Lambda. Now customers can cryptographically sign Lambda code to ensure trust, integrity, and functionality." + }, + { + "type": "feature", + "category": "AWS Lambda", + "contributor": "", + "description": "This release includes support for new feature: Code Signing for AWS Lambda. This adds new resources and APIs to configure Lambda functions to accept and verify signed code artifacts at deployment." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "This release adds support for Multiple Private DNS names to DescribeVpcEndpointServices response." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.35.json b/.changes/2.15.35.json new file mode 100644 index 000000000000..27df17571e74 --- /dev/null +++ b/.changes/2.15.35.json @@ -0,0 +1,120 @@ +{ + "version": "2.15.35", + "date": "2020-11-24", + "entries": [ + { + "type": "feature", + "category": "Amazon QuickSight", + "contributor": "", + "description": "Support for embedding without user registration. New enum EmbeddingIdentityType. A potential breaking change. Affects code that refers IdentityType enum type directly instead of literal string value." + }, + { + "type": "feature", + "category": "AWS Batch", + "contributor": "", + "description": "Add Ec2Configuration in ComputeEnvironment.ComputeResources. Use in CreateComputeEnvironment API to enable AmazonLinux2 support." + }, + { + "type": "feature", + "category": "AWS CloudTrail", + "contributor": "", + "description": "CloudTrail now includes advanced event selectors, which give you finer-grained control over the events that are logged to your trail." + }, + { + "type": "feature", + "category": "AWS Elemental MediaConvert", + "contributor": "", + "description": "AWS Elemental MediaConvert SDK has added support for Vorbis and Opus audio in OGG/OGA containers." + }, + { + "type": "feature", + "category": "Amazon GameLift", + "contributor": "", + "description": "GameLift FlexMatch is now available as a standalone matchmaking solution. FlexMatch now provides customizable matchmaking for games hosted peer-to-peer, on-premises, or on cloud compute primitives." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS CodeBuild", + "contributor": "", + "description": "Adding GetReportGroupTrend API for Test Reports." + }, + { + "type": "feature", + "category": "Amazon Comprehend", + "contributor": "", + "description": "Support Comprehend events detection APIs" + }, + { + "type": "feature", + "category": "Amazon Transcribe Streaming Service", + "contributor": "", + "description": "Amazon Transcribe Medical streaming added medical specialties and HTTP/2 support. Amazon Transcribe streaming supports additional languages. Both support OGG/OPUS and FLAC codecs for streaming." + }, + { + "type": "feature", + "category": "Amazon Appflow", + "contributor": "", + "description": "Upsolver as a destination connector and documentation update." + }, + { + "type": "feature", + "category": "Amazon Cognito Identity Provider", + "contributor": "", + "description": "This release adds ability to configure Cognito User Pools with third party sms and email providers for sending notifications to users." + }, + { + "type": "feature", + "category": "Amazon FSx", + "contributor": "", + "description": "This release adds the capability to increase storage capacity of Amazon FSx for Lustre file systems, providing the flexibility to meet evolving storage needs over time." + }, + { + "type": "feature", + "category": "AWS CloudFormation", + "contributor": "", + "description": "Adds support for the new Modules feature for CloudFormation. A module encapsulates one or more resources and their respective configurations for reuse across your organization." + }, + { + "type": "feature", + "category": "AmazonMWAA", + "contributor": "", + "description": "(New Service) Amazon MWAA is a managed service for Apache Airflow that makes it easy for data engineers and data scientists to execute data processing workflows in the cloud." + }, + { + "type": "feature", + "category": "Amazon Timestream Write", + "contributor": "", + "description": "Adds support of upserts for idempotent updates to Timestream." + }, + { + "type": "feature", + "category": "Amazon Lex Model Building Service", + "contributor": "", + "description": "Lex now supports es-419, de-DE locales" + }, + { + "type": "feature", + "category": "AWS IoT SiteWise", + "contributor": "", + "description": "This release adds support for customer managed customer master key (CMK) based encryption in IoT SiteWise." + }, + { + "type": "feature", + "category": "AWS Step Functions", + "contributor": "", + "description": "This release of the AWS Step Functions SDK introduces support for Synchronous Express Workflows" + }, + { + "type": "feature", + "category": "AWS Elastic Beanstalk", + "contributor": "", + "description": "Updates the Integer constraint of DescribeEnvironmentManagedActionHistory's MaxItems parameter to [1, 100]." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.36.json b/.changes/2.15.36.json new file mode 100644 index 000000000000..8094b61486c6 --- /dev/null +++ b/.changes/2.15.36.json @@ -0,0 +1,30 @@ +{ + "version": "2.15.36", + "date": "2020-11-30", + "entries": [ + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Fixed a resource leak that could occur when closing the default credentials provider (or a client using the default credentials provider), when `closeable` credentials like STS or SSO were in use. Fixes [#2149](https://github.com/aws/aws-sdk-java-v2/issues/2149)." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "This release introduces Amazon EC2 Mac1 instances, a new Amazon EC2 instance family built on Apple Mac mini computers, powered by AWS Nitro System, and support running macOS workloads on Amazon EC2" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Add LICENSE.txt and NOTICE.txt to META-INF directory of generated JARs" + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.37.json b/.changes/2.15.37.json new file mode 100644 index 000000000000..a6db8da9a1cc --- /dev/null +++ b/.changes/2.15.37.json @@ -0,0 +1,102 @@ +{ + "version": "2.15.37", + "date": "2020-12-01", + "entries": [ + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "This release adds support for: EBS gp3 volumes; and D3/D3en/R5b/M5zn instances powered by Intel Cascade Lake CPUs" + }, + { + "type": "feature", + "category": "AmplifyBackend", + "contributor": "", + "description": "(New Service) The Amplify Admin UI offers an accessible way to develop app backends and manage app content. We recommend that you use the Amplify Admin UI to manage the backend of your Amplify app." + }, + { + "type": "feature", + "category": "Amazon Connect Service", + "contributor": "", + "description": "This release adds an Amazon Connect API that provides the ability to create tasks, and a set of APIs (in preview) to configure AppIntegrations associations with Amazon Connect instances." + }, + { + "type": "feature", + "category": "Amazon AppIntegrations Service", + "contributor": "", + "description": "The Amazon AppIntegrations service (in preview release) enables you to configure and reuse connections to external applications." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon Connect Contact Lens", + "contributor": "", + "description": "Contact Lens for Amazon Connect analyzes conversations, both real-time and post-call. The ListRealtimeContactAnalysisSegments API returns a list of analysis segments for a real-time analysis session." + }, + { + "type": "feature", + "category": "Amazon SageMaker Feature Store Runtime", + "contributor": "", + "description": "This release adds support for Amazon SageMaker Feature Store, which makes it easy for customers to create, version, share, and manage curated data for machine learning (ML) development." + }, + { + "type": "feature", + "category": "Amazon Simple Storage Service", + "contributor": "", + "description": "S3 adds support for multiple-destination replication, option to sync replica modifications; S3 Bucket Keys to reduce cost of S3 SSE with AWS KMS" + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "Amazon SageMaker Pipelines for ML workflows. Amazon SageMaker Feature Store, a fully managed repository for ML features." + }, + { + "type": "feature", + "category": "Amazon Elastic Kubernetes Service", + "contributor": "", + "description": "Amazon EKS now allows you to define and manage the lifecycle for Kubernetes add-ons for your clusters. This release adds support for the AWS VPC CNI (vpc-cni)." + }, + { + "type": "feature", + "category": "Amazon DevOps Guru", + "contributor": "", + "description": "(New Service) Amazon DevOps Guru is available in public preview. It's a fully managed service that uses machine learning to analyze your operational solutions to help you find and troubleshoot issues." + }, + { + "type": "feature", + "category": "AWS Lambda", + "contributor": "", + "description": "This release includes support for a new feature: Container images support in AWS Lambda. This adds APIs for deploying functions as container images. AWS Lambda now supports memory up to 10240MB." + }, + { + "type": "feature", + "category": "AWS Directory Service", + "contributor": "", + "description": "Adding client authentication feature for AWS AD Connector" + }, + { + "type": "feature", + "category": "Amazon Lookout for Vision", + "contributor": "", + "description": "This release introduces support for Amazon Lookout for Vision." + }, + { + "type": "feature", + "category": "Amazon Honeycode", + "contributor": "", + "description": "Introducing APIs to read and write directly from Honeycode tables. Use APIs to pull table and column metadata, then use the read and write APIs to programmatically read and write from the tables." + }, + { + "type": "feature", + "category": "Amazon Elastic Container Registry Public", + "contributor": "", + "description": "Supports Amazon Elastic Container Registry (Amazon ECR) Public, a fully managed registry that makes it easy for a developer to publicly share container software worldwide for anyone to download." + } + ] +} diff --git a/.changes/2.15.38.json b/.changes/2.15.38.json new file mode 100644 index 000000000000..28f4e9b06414 --- /dev/null +++ b/.changes/2.15.38.json @@ -0,0 +1,18 @@ +{ + "version": "2.15.38", + "date": "2020-12-01", + "entries": [ + { + "type": "feature", + "category": "Amazon Connect Customer Profiles", + "contributor": "", + "description": "This is the first release of Amazon Connect Customer Profiles, a unified customer profile for your Amazon Connect contact center." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.39.json b/.changes/2.15.39.json new file mode 100644 index 000000000000..d3e28bac8e74 --- /dev/null +++ b/.changes/2.15.39.json @@ -0,0 +1,36 @@ +{ + "version": "2.15.39", + "date": "2020-12-03", + "entries": [ + { + "type": "feature", + "category": "AWS License Manager", + "contributor": "", + "description": "AWS License Manager enables managed entitlements for AWS customers and Software Vendors (ISV). You can track and distribute license entitlements from AWS Marketplace and supported ISVs." + }, + { + "type": "feature", + "category": "AWS Batch", + "contributor": "", + "description": "This release adds support for customer to run Batch Jobs on ECS Fargate, the serverless compute engine built for containers on AWS. Customer can also propagate Job and Job Definition Tags to ECS Task." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AmplifyBackend", + "contributor": "", + "description": "Regular documentation updates." + }, + { + "type": "feature", + "category": "AWS Compute Optimizer", + "contributor": "", + "description": "This release enables AWS Compute Optimizer to analyze and generate optimization recommendations for EBS volumes that are attached to instances." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.8.json b/.changes/2.15.8.json new file mode 100644 index 000000000000..61104ed6bd0a --- /dev/null +++ b/.changes/2.15.8.json @@ -0,0 +1,96 @@ +{ + "version": "2.15.8", + "date": "2020-10-15", + "entries": [ + { + "type": "bugfix", + "category": "AWS Lambda Maven Archetype", + "description": "Fixed an issue where archetype generation failed with latest maven-archetype-plugin. See [#1981](https://github.com/aws/aws-sdk-java-v2/issues/1981)" + }, + { + "type": "feature", + "category": "Amazon Rekognition", + "description": "This SDK Release introduces new API (DetectProtectiveEquipment) for Amazon Rekognition. This release also adds ServiceQuotaExceeded exception to Amazon Rekognition IndexFaces API." + }, + { + "type": "feature", + "category": "AWS Ground Station", + "description": "Adds error message attribute to DescribeContact DataflowDetails" + }, + { + "type": "feature", + "category": "AWS Database Migration Service", + "description": "When creating Endpoints, Replication Instances, and Replication Tasks, the feature provides you the option to specify friendly name to the resources." + }, + { + "type": "feature", + "category": "Amazon WorkSpaces", + "description": "Documentation updates for WorkSpaces" + }, + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "description": "This Patch Manager release now supports searching for available packages from Amazon Linux and Amazon Linux 2 via the DescribeAvailablePatches API." + }, + { + "type": "feature", + "category": "Amazon WorkMail", + "description": "Add CreateOrganization and DeleteOrganization API operations." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Add support for plus (+) character in profile names" + }, + { + "type": "feature", + "category": "AWS Transfer Family", + "description": "Add support to associate VPC Security Groups at server creation." + }, + { + "type": "feature", + "category": "AWS Glue", + "description": "API Documentation updates for Glue Get-Plan API" + }, + { + "type": "feature", + "category": "AWS Cost Explorer Service", + "description": "This release improves email validation for subscriptions on the SDK endpoints." + }, + { + "type": "feature", + "category": "AWS IoT", + "description": "Add new variable, lastStatusChangeDate, to DescribeDomainConfiguration API" + }, + { + "type": "feature", + "category": "AWS X-Ray", + "description": "Enhancing CreateGroup, UpdateGroup, GetGroup and GetGroups APIs to support configuring X-Ray Insights Notifications. Adding TraceLimit information into X-Ray BatchGetTraces API response." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "description": "Return tags for all resources in the output of DescribeDBInstances, DescribeDBSnapshots, DescribeDBClusters, and DescribeDBClusterSnapshots API operations." + }, + { + "type": "feature", + "category": "AWS Budgets", + "description": "This release introduces AWS Budgets Actions, allowing you to define an explicit response(or set of responses) to take when your budget exceeds it's action threshold." + }, + { + "type": "feature", + "category": "Access Analyzer", + "description": "This release adds support for the ApplyArchiveRule api in IAM Access Analyzer. The ApplyArchiveRule api allows users to apply an archive rule retroactively to existing findings in an analyzer." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon Macie 2", + "description": "This release of the Amazon Macie API adds support for pausing and resuming classification jobs. Also, sensitive data findings now include location data for up to 15 occurrences of sensitive data." + } + ] +} \ No newline at end of file diff --git a/.changes/2.15.9.json b/.changes/2.15.9.json new file mode 100644 index 000000000000..3bc1c441b5f2 --- /dev/null +++ b/.changes/2.15.9.json @@ -0,0 +1,21 @@ +{ + "version": "2.15.9", + "date": "2020-10-16", + "entries": [ + { + "type": "feature", + "category": "AWS Organizations", + "description": "Documentation updates for AWS Organizations." + }, + { + "type": "feature", + "category": "AWS Elemental MediaLive", + "description": "The AWS Elemental MediaLive APIs and SDKs now support the ability to transfer the ownership of MediaLive Link devices across AWS accounts." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/next-release/bugfix-AWSLambdaMavenArchetype-9037fcf.json b/.changes/next-release/bugfix-AWSLambdaMavenArchetype-9037fcf.json deleted file mode 100644 index 2469ca1874ae..000000000000 --- a/.changes/next-release/bugfix-AWSLambdaMavenArchetype-9037fcf.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "category": "AWS Lambda Maven Archetype", - "type": "bugfix", - "description": "Fixed an issue where archetype generation failed with latest maven-archetype-plugin. See [#1981](https://github.com/aws/aws-sdk-java-v2/issues/1981)" -} diff --git a/.github/workflows/closed-issue-message.yml b/.github/workflows/closed-issue-message.yml new file mode 100644 index 000000000000..e1137cf61719 --- /dev/null +++ b/.github/workflows/closed-issue-message.yml @@ -0,0 +1,17 @@ +name: Closed Issue Message +on: + issues: + types: [closed] +jobs: + auto_comment: + runs-on: ubuntu-latest + steps: + - uses: aws-actions/closed-issue-message@v1 + with: + # These inputs are both required + repo-token: "${{ secrets.GITHUB_TOKEN }}" + message: | + ### ⚠️COMMENT VISIBILITY WARNING⚠️ + Comments on closed issues are hard for our team to see. + If you need more assistance, please open a new issue that references this one. + If you wish to keep having a conversation with other community members under this issue feel free to do so. diff --git a/CHANGELOG.md b/CHANGELOG.md index 4065d4d91a4a..28039567cc6e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,1061 @@ +# __2.15.39__ __2020-12-03__ +## __AWS Batch__ + - ### Features + - This release adds support for customer to run Batch Jobs on ECS Fargate, the serverless compute engine built for containers on AWS. Customer can also propagate Job and Job Definition Tags to ECS Task. + +## __AWS Compute Optimizer__ + - ### Features + - This release enables AWS Compute Optimizer to analyze and generate optimization recommendations for EBS volumes that are attached to instances. + +## __AWS License Manager__ + - ### Features + - AWS License Manager enables managed entitlements for AWS customers and Software Vendors (ISV). You can track and distribute license entitlements from AWS Marketplace and supported ISVs. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AmplifyBackend__ + - ### Features + - Regular documentation updates. + +# __2.15.38__ __2020-12-01__ +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon Connect Customer Profiles__ + - ### Features + - This is the first release of Amazon Connect Customer Profiles, a unified customer profile for your Amazon Connect contact center. + +# __2.15.37__ __2020-12-01__ +## __AWS Directory Service__ + - ### Features + - Adding client authentication feature for AWS AD Connector + +## __AWS Lambda__ + - ### Features + - This release includes support for a new feature: Container images support in AWS Lambda. This adds APIs for deploying functions as container images. AWS Lambda now supports memory up to 10240MB. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon AppIntegrations Service__ + - ### Features + - The Amazon AppIntegrations service (in preview release) enables you to configure and reuse connections to external applications. + +## __Amazon Connect Contact Lens__ + - ### Features + - Contact Lens for Amazon Connect analyzes conversations, both real-time and post-call. The ListRealtimeContactAnalysisSegments API returns a list of analysis segments for a real-time analysis session. + +## __Amazon Connect Service__ + - ### Features + - This release adds an Amazon Connect API that provides the ability to create tasks, and a set of APIs (in preview) to configure AppIntegrations associations with Amazon Connect instances. + +## __Amazon DevOps Guru__ + - ### Features + - (New Service) Amazon DevOps Guru is available in public preview. It's a fully managed service that uses machine learning to analyze your operational solutions to help you find and troubleshoot issues. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release adds support for: EBS gp3 volumes; and D3/D3en/R5b/M5zn instances powered by Intel Cascade Lake CPUs + +## __Amazon Elastic Container Registry Public__ + - ### Features + - Supports Amazon Elastic Container Registry (Amazon ECR) Public, a fully managed registry that makes it easy for a developer to publicly share container software worldwide for anyone to download. + +## __Amazon Elastic Kubernetes Service__ + - ### Features + - Amazon EKS now allows you to define and manage the lifecycle for Kubernetes add-ons for your clusters. This release adds support for the AWS VPC CNI (vpc-cni). + +## __Amazon Honeycode__ + - ### Features + - Introducing APIs to read and write directly from Honeycode tables. Use APIs to pull table and column metadata, then use the read and write APIs to programmatically read and write from the tables. + +## __Amazon Lookout for Vision__ + - ### Features + - This release introduces support for Amazon Lookout for Vision. + +## __Amazon SageMaker Feature Store Runtime__ + - ### Features + - This release adds support for Amazon SageMaker Feature Store, which makes it easy for customers to create, version, share, and manage curated data for machine learning (ML) development. + +## __Amazon SageMaker Service__ + - ### Features + - Amazon SageMaker Pipelines for ML workflows. Amazon SageMaker Feature Store, a fully managed repository for ML features. + +## __Amazon Simple Storage Service__ + - ### Features + - S3 adds support for multiple-destination replication, option to sync replica modifications; S3 Bucket Keys to reduce cost of S3 SSE with AWS KMS + +## __AmplifyBackend__ + - ### Features + - (New Service) The Amplify Admin UI offers an accessible way to develop app backends and manage app content. We recommend that you use the Amplify Admin UI to manage the backend of your Amplify app. + +# __2.15.36__ __2020-11-30__ +## __AWS SDK for Java v2__ + - ### Features + - Add LICENSE.txt and NOTICE.txt to META-INF directory of generated JARs + - Updated service endpoint metadata. + + - ### Bugfixes + - Fixed a resource leak that could occur when closing the default credentials provider (or a client using the default credentials provider), when `closeable` credentials like STS or SSO were in use. Fixes [#2149](https://github.com/aws/aws-sdk-java-v2/issues/2149). + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release introduces Amazon EC2 Mac1 instances, a new Amazon EC2 instance family built on Apple Mac mini computers, powered by AWS Nitro System, and support running macOS workloads on Amazon EC2 + +# __2.15.35__ __2020-11-24__ +## __AWS Batch__ + - ### Features + - Add Ec2Configuration in ComputeEnvironment.ComputeResources. Use in CreateComputeEnvironment API to enable AmazonLinux2 support. + +## __AWS CloudFormation__ + - ### Features + - Adds support for the new Modules feature for CloudFormation. A module encapsulates one or more resources and their respective configurations for reuse across your organization. + +## __AWS CloudTrail__ + - ### Features + - CloudTrail now includes advanced event selectors, which give you finer-grained control over the events that are logged to your trail. + +## __AWS CodeBuild__ + - ### Features + - Adding GetReportGroupTrend API for Test Reports. + +## __AWS Elastic Beanstalk__ + - ### Features + - Updates the Integer constraint of DescribeEnvironmentManagedActionHistory's MaxItems parameter to [1, 100]. + +## __AWS Elemental MediaConvert__ + - ### Features + - AWS Elemental MediaConvert SDK has added support for Vorbis and Opus audio in OGG/OGA containers. + +## __AWS IoT SiteWise__ + - ### Features + - This release adds support for customer managed customer master key (CMK) based encryption in IoT SiteWise. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS Step Functions__ + - ### Features + - This release of the AWS Step Functions SDK introduces support for Synchronous Express Workflows + +## __Amazon Appflow__ + - ### Features + - Upsolver as a destination connector and documentation update. + +## __Amazon Cognito Identity Provider__ + - ### Features + - This release adds ability to configure Cognito User Pools with third party sms and email providers for sending notifications to users. + +## __Amazon Comprehend__ + - ### Features + - Support Comprehend events detection APIs + +## __Amazon FSx__ + - ### Features + - This release adds the capability to increase storage capacity of Amazon FSx for Lustre file systems, providing the flexibility to meet evolving storage needs over time. + +## __Amazon GameLift__ + - ### Features + - GameLift FlexMatch is now available as a standalone matchmaking solution. FlexMatch now provides customizable matchmaking for games hosted peer-to-peer, on-premises, or on cloud compute primitives. + +## __Amazon Lex Model Building Service__ + - ### Features + - Lex now supports es-419, de-DE locales + +## __Amazon QuickSight__ + - ### Features + - Support for embedding without user registration. New enum EmbeddingIdentityType. A potential breaking change. Affects code that refers IdentityType enum type directly instead of literal string value. + +## __Amazon Timestream Write__ + - ### Features + - Adds support of upserts for idempotent updates to Timestream. + +## __Amazon Transcribe Streaming Service__ + - ### Features + - Amazon Transcribe Medical streaming added medical specialties and HTTP/2 support. Amazon Transcribe streaming supports additional languages. Both support OGG/OPUS and FLAC codecs for streaming. + +## __AmazonMWAA__ + - ### Features + - (New Service) Amazon MWAA is a managed service for Apache Airflow that makes it easy for data engineers and data scientists to execute data processing workflows in the cloud. + +# __2.15.34__ __2020-11-23__ +## __AWS CodeStar connections__ + - ### Features + - Added support for the UpdateHost API. + +## __AWS Glue__ + - ### Features + - Feature1 - Glue crawler adds data lineage configuration option. Feature2 - AWS Glue Data Catalog adds APIs for PartitionIndex creation and deletion as part of Enhancement Partition Management feature. + +## __AWS IoT__ + - ### Features + - This release enables users to identify different file types in the over-the-air update (OTA) functionality using fileType parameter for CreateOTAUpdate API + +## __AWS Lambda__ + - ### Features + - This release includes support for new feature: Code Signing for AWS Lambda. This adds new resources and APIs to configure Lambda functions to accept and verify signed code artifacts at deployment. + +## __AWS License Manager__ + - ### Features + - AWS License Manager now provides the ability for license administrators to be able to associate license configurations to AMIs shared with their AWS account + +## __AWS Outposts__ + - ### Features + - Support specifying tags during the creation of the Outpost resource. Tags are now returned in the response body of Outpost APIs. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS SecurityHub__ + - ### Features + - Updated the account management API to support the integration with AWS Organizations. Added new methods to allow users to view and manage the delegated administrator account for Security Hub. + +## __AWS Signer__ + - ### Features + - AWS Signer is launching code-signing for AWS Lambda. Now customers can cryptographically sign Lambda code to ensure trust, integrity, and functionality. + +## __AWS Single Sign-On Admin__ + - ### Features + - AWS Single Sign-On now enables attribute-based access control for workforce identities to simplify permissions in AWS + +## __Amazon CloudWatch Application Insights__ + - ### Features + - Add Detected Workload to ApplicationComponent which shows the workloads that installed in the component + +## __Amazon DynamoDB__ + - ### Features + - With this release, you can capture data changes in any Amazon DynamoDB table as an Amazon Kinesis data stream. You also can use PartiQL (SQL-compatible language) to manipulate data in DynamoDB tables. + +## __Amazon EC2 Container Service__ + - ### Features + - This release adds support for updating capacity providers, specifying custom instance warmup periods for capacity providers, and using deployment circuit breaker for your ECS Services. + +## __Amazon ElastiCache__ + - ### Features + - Documentation updates for elasticache + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release adds support for Multiple Private DNS names to DescribeVpcEndpointServices response. + +## __Amazon Elastic MapReduce__ + - ### Features + - Add API support for EMR Studio, a new notebook-first IDE for data scientists and data engineers with single sign-on, Jupyter notebooks, automated infrastructure provisioning, and job diagnosis. + +## __Amazon Forecast Service__ + - ### Features + - Releasing the set of PredictorBacktestExportJob APIs which allow customers to export backtest values and item-level metrics data from Predictor training. + +## __Amazon Timestream Query__ + - ### Features + - Amazon Timestream now supports "QueryStatus" in Query API which has information about cumulative bytes scanned, metered, as well as progress percentage for the query. + +## __Amazon Translate__ + - ### Features + - This update adds new operations to create and manage parallel data in Amazon Translate. Parallel data is a resource that you can use to run Active Custom Translation jobs. + +## __Auto Scaling__ + - ### Features + - Documentation updates and corrections for Amazon EC2 Auto Scaling API Reference and SDKs. + +## __CodeArtifact__ + - ### Features + - Add support for the NuGet package format. + +## __Managed Streaming for Kafka__ + - ### Features + - Adding MAINTENANCE and REBOOTING_BROKER to Cluster states. + +# __2.15.33__ __2020-11-20__ +## __AWS App Mesh__ + - ### Features + - This release makes tag value a required attribute of the tag's key-value pair. + +## __AWS CloudHSM V2__ + - ### Features + - Added managed backup retention, a feature that enables customers to retain backups for a configurable period after which CloudHSM service will automatically delete them. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS Service Catalog App Registry__ + - ### Features + - AWS Service Catalog AppRegistry Documentation update + +## __AWS Single Sign-on__ + - ### Features + - Added support for retrieving SSO credentials: https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html. + +## __Amazon Chime__ + - ### Features + - The Amazon Chime SDK for messaging provides the building blocks needed to build chat and other real-time collaboration features. + +## __Amazon CodeGuru Reviewer__ + - ### Features + - This release supports tagging repository association resources in Amazon CodeGuru Reviewer. + +## __Amazon Cognito Identity__ + - ### Features + - Added SDK pagination support for ListIdentityPools + +## __Amazon Connect Service__ + - ### Features + - This release adds a set of Amazon Connect APIs to programmatically control instance creation, modification, description and deletion. + +## __Amazon Macie 2__ + - ### Features + - The Amazon Macie API now provides S3 bucket metadata that indicates whether any one-time or recurring classification jobs are configured to analyze data in a bucket. + +## __Amazon Simple Storage Service__ + - ### Features + - Add new documentation regarding automatically generated Content-MD5 headers when using the SDK or CLI. + +## __Managed Streaming for Kafka__ + - ### Features + - This release adds support for PER TOPIC PER PARTITION monitoring on AWS MSK clusters. + +# __2.15.32__ __2020-11-19__ +## __AWS Cost Explorer Service__ + - ### Features + - Additional metadata that may be applicable to the recommendation. + +## __AWS Directory Service__ + - ### Features + - Adding multi-region replication feature for AWS Managed Microsoft AD + +## __AWS Elemental MediaLive__ + - ### Features + - The AWS Elemental MediaLive APIs and SDKs now support the ability to see the software update status on Link devices + +## __AWS Glue__ + - ### Features + - Adding support for Glue Schema Registry. The AWS Glue Schema Registry is a new feature that allows you to centrally discover, control, and evolve data stream schemas. + +## __AWS Lambda__ + - ### Features + - Added the starting position and starting position timestamp to ESM Configuration. Now customers will be able to view these fields for their ESM. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon CloudWatch Events__ + - ### Features + - EventBridge now supports Resource-based policy authorization on event buses. This enables cross-account PutEvents API calls, creating cross-account rules, and simplifies permission management. + +## __Amazon EventBridge__ + - ### Features + - EventBridge now supports Resource-based policy authorization on event buses. This enables cross-account PutEvents API calls, creating cross-account rules, and simplifies permission management. + +## __Amazon Kinesis Analytics__ + - ### Features + - Amazon Kinesis Data Analytics now supports building and running streaming applications using Apache Flink 1.11 and provides a way to access the Apache Flink dashboard for supported Flink versions. + +## __Amazon Lex Model Building Service__ + - ### Features + - Amazon Lex supports managing input and output contexts as well as default values for slots. + +## __Amazon Lex Runtime Service__ + - ### Features + - Amazon Lex now supports the ability to view and manage active contexts associated with a user session. + +## __Amazon Redshift__ + - ### Features + - Amazon Redshift support for returning ClusterNamespaceArn in describeClusters + +## __Auto Scaling__ + - ### Features + - You can now create Auto Scaling groups with multiple launch templates using a mixed instances policy, making it easy to deploy an AMI with an architecture that is different from the rest of the group. + +# __2.15.31__ __2020-11-18__ +## __AWS Backup__ + - ### Features + - AWS Backup now supports cross-account backup, enabling AWS customers to securely copy their backups across their AWS accounts within their AWS organizations. + +## __AWS CloudFormation__ + - ### Features + - This release adds ChangeSets support for Nested Stacks. ChangeSets offer a preview of how proposed changes to a stack might impact existing resources or create new ones. + +## __AWS CodeBuild__ + - ### Features + - AWS CodeBuild - Adding Status field for Report Group + +## __AWS Outposts__ + - ### Features + - Mark the Name parameter in CreateOutpost as required. + +## __AWS S3 Control__ + - ### Features + - AWS S3 Storage Lens provides visibility into your storage usage and activity trends at the organization or account level, with aggregations by Region, storage class, bucket, and prefix. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon ElastiCache__ + - ### Features + - Adding Memcached 1.6 to parameter family + +## __Amazon Elastic Compute Cloud__ + - ### Features + - EC2 Fleet adds support of DeleteFleets API for instant type fleets. Now you can delete an instant type fleet and terminate all associated instances with a single API call. + +# __2.15.30__ __2020-11-17__ +## __AWS Network Firewall__ + - ### Features + - (New Service) AWS Network Firewall is a managed network layer firewall service that makes it easy to secure your virtual private cloud (VPC) networks and block malicious traffic. + +## __Amazon Chime__ + - ### Features + - This release adds CRUD APIs for Amazon Chime SipMediaApplications and SipRules. It also adds the API for creating outbound PSTN calls for Amazon Chime meetings. + +## __Amazon Connect Service__ + - ### Features + - This release adds support for user hierarchy group and user hierarchy structure. For details, see the Release Notes in the Amazon Connect Administrator Guide. + +## __Amazon Macie 2__ + - ### Features + - The Amazon Macie API now has a lastRunErrorStatus property to indicate if account- or bucket-level errors occurred during the run of a one-time classification job or the latest run of a recurring job. + +## __Amazon Relational Database Service__ + - ### Features + - Support copy-db-snapshot in the one region on cross clusters and local cluster for RDSonVmware. Add target-custom-availability-zone parameter to specify where a snapshot should be copied. + +## __Firewall Management Service__ + - ### Features + - Added Firewall Manager policy support for AWS Network Firewall resources. + +# __2.15.29__ __2020-11-16__ +## __AWS CodePipeline__ + - ### Features + - We show details about inbound executions and id of action executions in GetPipelineState API. We also add ConflictException to StartPipelineExecution, RetryStageExecution, StopPipelineExecution APIs. + +## __AWS Database Migration Service__ + - ### Features + - Adding MoveReplicationTask feature to move replication tasks between instances + +## __AWS IoT Secure Tunneling__ + - ### Features + - Support using multiple data streams per tunnel using the Secure Tunneling multiplexing feature. + +## __AWS IoT SiteWise__ + - ### Features + - This release supports Unicode characters for string operations in formulae computes in SiteWise. For more information, search for SiteWise in Amazon What's new or refer the SiteWise documentation. + +## __AWS Service Catalog__ + - ### Features + - Support import of CloudFormation stacks into Service Catalog provisioned products. + +## __Amazon QuickSight__ + - ### Features + - Adding new parameters for dashboard persistence + +## __Amazon SageMaker Service__ + - ### Features + - This feature enables customers to encrypt their Amazon SageMaker Studio storage volumes with customer master keys (CMKs) managed by them in AWS Key Management Service (KMS). + +## __Amazon Simple Notification Service__ + - ### Features + - Documentation updates for Amazon SNS. + +## __Synthetics__ + - ### Features + - AWS Synthetics now supports Environment Variables to assign runtime parameters in the canary scripts. + +# __2.15.28__ __2020-11-13__ +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS Shield__ + - ### Features + - This release adds APIs for two new features: 1) Allow customers to bundle resources into protection groups and treat as a single unit. 2) Provide per-account event summaries to all AWS customers. + +## __Amazon Textract__ + - ### Features + - AWS Textract now allows customers to specify their own KMS key to be used for asynchronous jobs output results, AWS Textract now also recognizes handwritten text from English documents. + +## __Elastic Load Balancing__ + - ### Features + - Adds dualstack support for Network Load Balancers (TCP/TLS only), an attribute for WAF fail open for Application Load Balancers, and an attribute for connection draining for Network Load Balancers. + +# __2.15.27__ __2020-11-12__ +## __AWS IoT__ + - ### Features + - This release adds a batchMode parameter to the IotEvents, IotAnalytics, and Firehose actions which allows customers to send an array of messages to the corresponding services + +## __AWS RoboMaker__ + - ### Features + - This release introduces Robomaker Worldforge TagsOnCreate which allows customers to tag worlds as they are being generated by providing the tags while configuring a world generation job. + +## __AWS Service Catalog App Registry__ + - ### Features + - AWS Service Catalog AppRegistry provides a repository of your applications, their resources, and the application metadata that you use within your enterprise. + +## __Amazon Lex Model Building Service__ + - ### Features + - Lex now supports es-ES, it-IT, fr-FR and fr-CA locales + +## __Amazon Lightsail__ + - ### Features + - This release adds support for Amazon Lightsail container services. You can now create a Lightsail container service, and deploy Docker images to it. + +## __Amazon Personalize Runtime__ + - ### Features + - Adds support to use dynamic filters with Personalize. + +## __Amazon Polly__ + - ### Features + - Amazon Polly adds new Australian English female voice - Olivia. Olivia is available as Neural voice only. + +# __2.15.26__ __2020-11-11__ +## __AWS Amplify__ + - ### Features + - Whereas previously custom headers were set via the app's buildspec, custom headers can now be set directly on the Amplify app for both ci/cd and manual deploy apps. + +## __AWS Elemental MediaConvert__ + - ### Features + - AWS Elemental MediaConvert SDK has added support for Automated ABR encoding and improved the reliability of embedded captions in accelerated outputs. + +## __AWS Glue DataBrew__ + - ### Features + - This is the initial SDK release for AWS Glue DataBrew. DataBrew is a visual data preparation tool that enables users to clean and normalize data without writing any code. + +## __AWS Service Catalog__ + - ### Features + - Adding support to remove a Provisioned Product launch role via UpdateProvisionedProductProperties + +## __Amazon Forecast Service__ + - ### Features + - Providing support of custom quantiles in CreatePredictor API. + +## __Amazon QuickSight__ + - ### Features + - QuickSight now supports Column-level security and connecting to Oracle data source. + +## __Netty NIO HTTP Client__ + - ### Features + - Upgrade Netty libraries to `4.1.53.Final`, and `netty-tcnative-boringssl-static` to `2.0.34.Final`. + + - ### Bugfixes + - Fix a bug where the Netty HTTP client can leak memory when a response stream is cancelled prematurely but the upstream publisher continues to invoke onNext for some time before stopping. Fixes [#2051](https://github.com/aws/aws-sdk-java-v2/issues/2051). + +# __2.15.25__ __2020-11-10__ +## __AWS SDK for Java v2__ + - ### Bugfixes + - Fix default client error to have spaces between words. + - Contributed by: @frosforever + - Replaced class loading from Thread.currentThread().getContextClassLoader() to ClassLoaderHelper in ProfileCredentialsUtils and WebIdentityCredentialsUtils, since it was causing Class not found error. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release adds support for Gateway Load Balancer VPC endpoints and VPC endpoint services + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - Add SessionId as a filter for DescribeSessions API + +## __Auto Scaling__ + - ### Features + - Documentation updates and corrections for Amazon EC2 Auto Scaling API Reference and SDKs. + +## __Elastic Load Balancing__ + - ### Features + - Added support for Gateway Load Balancers, which make it easy to deploy, scale, and run third-party virtual networking appliances. + +## __Contributors__ +Special thanks to the following contributors to this release: + +@frosforever +# __2.15.24__ __2020-11-09__ +## __AWS DataSync__ + - ### Features + - DataSync now enables customers to adjust the network bandwidth used by a running AWS DataSync task. + +## __AWS IoT Analytics__ + - ### Features + - AWS IoT Analytics now supports Late Data Notifications for datasets, dataset content creation using previous version IDs, and includes the LastMessageArrivalTime attribute for channels and datastores. + +## __AWS Storage Gateway__ + - ### Features + - Added bandwidth rate limit schedule for Tape and Volume Gateways + +## __Amazon DynamoDB__ + - ### Features + - This release adds supports for exporting Amazon DynamoDB table data to Amazon S3 to perform analytics at any scale. + +## __Amazon EC2 Container Service__ + - ### Features + - This release provides native support for specifying Amazon FSx for Windows File Server file systems as volumes in your Amazon ECS task definitions. + +## __Amazon Elasticsearch Service__ + - ### Features + - Adding support for package versioning in Amazon Elasticsearch Service + +## __Amazon FSx__ + - ### Features + - This release adds support for creating DNS aliases for Amazon FSx for Windows File Server, and using AWS Backup to automate scheduled, policy-driven backup plans for Amazon FSx file systems. + +## __Amazon Macie 2__ + - ### Features + - Sensitive data findings in Amazon Macie now include enhanced location data for Apache Avro object containers and Apache Parquet files. + +## __Amazon Simple Storage Service__ + - ### Features + - S3 Intelligent-Tiering adds support for Archive and Deep Archive Access tiers; S3 Replication adds replication metrics and failure notifications, brings feature parity for delete marker replication + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - add a new filter to allow customer to filter automation executions by using resource-group which used for execute automation + +# __2.15.23__ __2020-11-06__ +## __AWS Elemental MediaLive__ + - ### Features + - Support for SCTE35 ad markers in OnCuePoint style in RTMP outputs. + +## __AWS IoT SiteWise__ + - ### Features + - Remove the CreatePresignedPortalUrl API + +## __Amazon Data Lifecycle Manager__ + - ### Features + - Amazon Data Lifecycle Manager now supports the creation and retention of EBS-backed Amazon Machine Images + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Network card support with four new attributes: NetworkCardIndex, NetworkPerformance, DefaultNetworkCardIndex, and MaximumNetworkInterfaces, added to the DescribeInstanceTypes API. + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - Documentation updates for Systems Manager + +# __2.15.22__ __2020-11-05__ +## __AWS App Mesh__ + - ### Features + - This release adds circuit breaking capabilities to your mesh with connection pooling and outlier detection support. + +## __AWS Lambda__ + - ### Features + - Support Amazon MQ as an Event Source. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWSKendraFrontendService__ + - ### Features + - Amazon Kendra now supports providing user context in your query requests, Tokens can be JSON or JWT format. This release also introduces support for Confluence cloud datasources. + +## __Amazon CloudWatch Events__ + - ### Features + - With this release, customers can now reprocess past events by storing the events published on event bus in an encrypted archive. + +## __Amazon DynamoDB__ + - ### Features + - This release adds a new ReplicaStatus INACCESSIBLE_ENCRYPTION_CREDENTIALS for the Table description, indicating when a key used to encrypt a regional replica table is not accessible. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Documentation updates for EC2. + +## __Amazon Elasticsearch Service__ + - ### Features + - Amazon Elasticsearch Service now provides the ability to define a custom endpoint for your domain and link an SSL certificate from ACM, making it easier to refer to Kibana and the domain endpoint. + +## __Amazon EventBridge__ + - ### Features + - With this release, customers can now reprocess past events by storing the events published on event bus in an encrypted archive. + +## __Amazon Fraud Detector__ + - ### Features + - Added support for deleting resources like Variables, ExternalModels, Outcomes, Models, ModelVersions, Labels, EventTypes and EntityTypes. Updated DeleteEvent operation to catch missing exceptions. + +## __Amazon Relational Database Service__ + - ### Features + - Supports a new parameter to set the max allocated storage in gigabytes for the CreateDBInstanceReadReplica API. + +# __2.15.21__ __2020-11-04__ +## __AWS IoT__ + - ### Features + - Updated API documentation and added paginator for AWS Iot Registry ListThingPrincipals API. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS Service Catalog__ + - ### Features + - Service Catalog API ListPortfolioAccess can now support a maximum PageSize of 100. + +## __AWS X-Ray__ + - ### Features + - Releasing new APIs GetInsightSummaries, GetInsightEvents, GetInsight, GetInsightImpactGraph and updating GetTimeSeriesServiceStatistics API for AWS X-Ray Insights feature + +## __AWSMarketplace Metering__ + - ### Features + - Adding Vendor Tagging Support in MeterUsage and BatchMeterUsage API. + +## __Amazon CloudWatch__ + - ### Features + - Documentation updates for monitoring + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Added support for Client Connect Handler for AWS Client VPN. Fleet supports launching replacement instances in response to Capacity Rebalance recommendation. + +## __Amazon Elasticsearch Service__ + - ### Features + - Amazon Elasticsearch Service now supports native SAML authentication that seamlessly integrates with the customers' existing SAML 2.0 Identity Provider (IdP). + +## __Amazon Transcribe Streaming Service__ + - ### Features + - With this release, Amazon Transcribe now supports real-time transcription from audio sources in Italian (it-IT) and German(de-DE). + +## __AmazonMQ__ + - ### Features + - Amazon MQ introduces support for RabbitMQ, a popular message-broker with native support for AMQP 0.9.1. You can now create fully-managed RabbitMQ brokers in the cloud. + +## __Auto Scaling__ + - ### Features + - Capacity Rebalance helps you manage and maintain workload availability during Spot interruptions by proactively augmenting your Auto Scaling group with a new instance before interrupting an old one. + +# __2.15.20__ __2020-11-02__ +## __AWS DynamoDB Enhanced Client__ + - ### Bugfixes + - Publisher streams returned by async resources in the DynamoDB Enhanced Client now correctly handle mapping errors when they are encountered in the stream by calling onError on the subscriber and then implicitly cancelling the subscription. Previously the stream would just permanently hang and never complete. + +## __AWS SDK for Java v2__ + - ### Features + - Added code generation validation that customer-visible identifiers are idiomatic (do not contain underscores). Services with underscores in their models can use rename customizations to fix these issues, or apply the 'underscoresInNameBehavior = ALLOW' customization. + - Upgrade `org.apache.httpcomponents:httpclient` version to `4.5.13` + + - ### Bugfixes + - Fixing race condition in EventStreamAsyncResponseTransformer. Field eventsToDeliver is a LinkedList, i.e., not thread-safe. Accesses to field eventsToDeliver are protected by synchronization on itself, but not in 1 location. + - The mapped publisher returned by SdkPublisher.map will now handle exceptions thrown by the mapping function by calling onError on its subscriber and then cancelling the subscription rather than throwing it back to the publishing process when it attempts to publish data. + +## __AWS SSO OIDC__ + - ### Deprecations + - Renamed/deprecated 'error_description' fields in exceptions in favor of 'errorDescription'. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release adds support for the following features: 1. P4d instances based on NVIDIA A100 GPUs. 2. NetworkCardIndex attribute to support multiple network cards. + +# __2.15.19__ __2020-10-30__ +## __AWS Database Migration Service__ + - ### Features + - Adding DocDbSettings to support DocumentDB as a source. + +## __AWS Elemental MediaLive__ + - ### Features + - Support for HLS discontinuity tags in the child manifests. Support for incomplete segment behavior in the media output. Support for automatic input failover condition settings. + +## __AWS SDK for Java v2__ + - ### Bugfixes + - Fixing FilteringSubscriber and LimitingSubscriber to complete when subscribing criteria is completed. + +## __Amazon ElastiCache__ + - ### Features + - Documentation updates for AWS ElastiCache + +## __Amazon Macie 2__ + - ### Features + - This release of the Amazon Macie API adds an eqExactMatch operator for filtering findings. With this operator you can increase the precision of your finding filters and suppression rules. + +## __Amazon Simple Notification Service__ + - ### Features + - Documentation updates for Amazon SNS + +## __Braket__ + - ### Features + - This release supports tagging for Amazon Braket quantum-task resources. It also supports tag-based access control for quantum-task APIs. + +## __EC2 Image Builder__ + - ### Features + - This feature increases the number of accounts that can be added to the Launch permissions within an Image Builder Distribution configuration. + +# __2.15.18__ __2020-10-29__ +## __AWS Marketplace Commerce Analytics__ + - ### Features + - Documentation updates for marketplacecommerceanalytics to specify four data sets which are deprecated. + +## __AWS Storage Gateway__ + - ### Features + - Adding support for access based enumeration on SMB file shares, file share visibility on SMB file shares, and file upload notifications for all file shares + +## __Amazon API Gateway__ + - ### Features + - Support disabling the default execute-api endpoint for REST APIs. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Support for Appliance mode on Transit Gateway that simplifies deployment of stateful network appliances. Added support for AWS Client VPN Self-Service Portal. + +## __Amazon Simple Email Service__ + - ### Features + - This release enables customers to manage their own contact lists and end-user subscription preferences. + +## __CodeArtifact__ + - ### Features + - Add support for tagging of CodeArtifact domain and repository resources. + +## __Elastic Load Balancing__ + - ### Features + - Application Load Balancer (ALB) now supports the gRPC protocol-version. With this release, customers can use ALB to route and load balance gRPC traffic between gRPC enabled clients and microservices. + +# __2.15.17__ __2020-10-28__ +## __AWS IoT__ + - ### Features + - This release adds support for GG-Managed Job Namespace + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + + - ### Bugfixes + - Fixed an issue where marshalling of a modeled object was not honoring the has* method on a list/map. + - Fixed an issue where the toString/equals/hashCode on a modeled object were not honoring the has* methods for lists and maps. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - AWS Nitro Enclaves general availability. Added support to RunInstances for creating enclave-enabled EC2 instances. New APIs to associate an ACM certificate with an IAM role, for enclave consumption. + +## __Amazon WorkMail__ + - ### Features + - Documentation update for Amazon WorkMail + +# __2.15.16__ __2020-10-27__ +## __AWS Glue__ + - ### Features + - AWS Glue machine learning transforms now support encryption-at-rest for labels and trained models. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +# __2.15.15__ __2020-10-26__ +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWSKendraFrontendService__ + - ### Features + - Amazon Kendra now supports indexing data from Confluence Server. + +## __Amazon Neptune__ + - ### Features + - This feature enables custom endpoints for Amazon Neptune clusters. Custom endpoints simplify connection management when clusters contain instances with different capacities and configuration settings. + +## __Amazon SageMaker Service__ + - ### Features + - This release enables customers to bring custom images for use with SageMaker Studio notebooks. + +# __2.15.14__ __2020-10-23__ +## __AWS MediaTailor__ + - ### Features + - MediaTailor now supports ad marker passthrough for HLS. Use AdMarkerPassthrough to pass EXT-X-CUE-IN, EXT-X-CUE-OUT, and EXT-X-SPLICEPOINT-SCTE35 from origin manifests into personalized manifests. + +## __Amazon Macie 2__ + - ### Features + - This release of the Amazon Macie API includes miscellaneous updates and improvements to the documentation. + +## __Amazon QuickSight__ + - ### Features + - Support description on columns. + +# __2.15.13__ __2020-10-22__ +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS Service Catalog__ + - ### Features + - Documentation updates for servicecatalog + +## __Access Analyzer__ + - ### Features + - API Documentation updates for IAM Access Analyzer. + +## __Amazon Appflow__ + - ### Features + - Salesforce connector creation with customer provided client id and client secret, incremental pull configuration, salesforce upsert write operations and execution ID when on-demand flows are executed. + +## __Amazon Simple Notification Service__ + - ### Features + - SNS now supports a new class of topics: FIFO (First-In-First-Out). FIFO topics provide strictly-ordered, deduplicated, filterable, encryptable, many-to-many messaging at scale. + +# __2.15.12__ __2020-10-21__ +## __AWS Global Accelerator__ + - ### Features + - This release adds support for specifying port overrides on AWS Global Accelerator endpoint groups. + +## __AWS Glue__ + - ### Features + - AWS Glue crawlers now support incremental crawls for the Amazon Simple Storage Service (Amazon S3) data source. + +## __AWS Organizations__ + - ### Features + - AWS Organizations renamed the 'master account' to 'management account'. + +## __AWSKendraFrontendService__ + - ### Features + - This release adds custom data sources: a new data source type that gives you full control of the documents added, modified or deleted during a data source sync while providing run history metrics. + +## __Amazon CloudFront__ + - ### Features + - CloudFront adds support for managing the public keys for signed URLs and signed cookies directly in CloudFront (it no longer requires the AWS root account). + +## __Amazon Elastic Compute Cloud__ + - ### Features + - instance-storage-info nvmeSupport added to DescribeInstanceTypes API + +# __2.15.11__ __2020-10-20__ +## __AWS AppSync__ + - ### Features + - Documentation updates to AppSync to correct several typos. + +## __AWS Batch__ + - ### Features + - Adding evaluateOnExit to job retry strategies. + +## __AWS Elastic Beanstalk__ + - ### Features + - EnvironmentStatus enum update to include Aborting, LinkingFrom and LinkingTo + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + + - ### Bugfixes + - Fixed an issue where requestBody and asyncRequestBody were not visible in ExecutionInterceptor.afterMarshalling. + +## __Amazon DynamoDB Enhanced Client__ + - ### Bugfixes + - Fix for handling special characters in attribute names with WRITE_IF_NOT_EXISTS update behavior + +## __Amazon S3__ + - ### Features + - Moved the logic for calculating the Content-MD5 checksums from s3 to sdk-core. As always, make sure to use a version of 'sdk-core' greater than or equal to your version of 's3'. If you use an old version of 'sdk-core' and a new version of 's3', you will receive errors that Content-MD5 is required. + + - ### Bugfixes + - Fixed an issue where metrics were not being collected for Amazon S3 (or other XML services) + +# __2.15.10__ __2020-10-19__ +## __AWS Backup__ + - ### Features + - Documentation updates for Cryo + +## __AWS Service Catalog__ + - ### Features + - An Admin can now update the launch role associated with a Provisioned Product. Admins and End Users can now view the launch role associated with a Provisioned Product. + +## __Amazon CloudFront__ + - ### Features + - Amazon CloudFront adds support for Origin Shield. + +## __Amazon DocumentDB with MongoDB compatibility__ + - ### Features + - Documentation updates for docdb + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - This Patch Manager release now supports Common Vulnerabilities and Exposure (CVE) Ids for missing packages via the DescribeInstancePatches API. + +## __HTTP Client SPI__ + - ### Features + - Calling the SdkHttpFullRequest uri() builder method, query parameters of the provided URI will be kept. + This can be useful in case you want to provide an already fully formed URI like a callback URI. + +# __2.15.9__ __2020-10-16__ +## __AWS Elemental MediaLive__ + - ### Features + - The AWS Elemental MediaLive APIs and SDKs now support the ability to transfer the ownership of MediaLive Link devices across AWS accounts. + +## __AWS Organizations__ + - ### Features + - Documentation updates for AWS Organizations. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +# __2.15.8__ __2020-10-15__ +## __AWS Budgets__ + - ### Features + - This release introduces AWS Budgets Actions, allowing you to define an explicit response(or set of responses) to take when your budget exceeds it's action threshold. + +## __AWS Cost Explorer Service__ + - ### Features + - This release improves email validation for subscriptions on the SDK endpoints. + +## __AWS Database Migration Service__ + - ### Features + - When creating Endpoints, Replication Instances, and Replication Tasks, the feature provides you the option to specify friendly name to the resources. + +## __AWS Glue__ + - ### Features + - API Documentation updates for Glue Get-Plan API + +## __AWS Ground Station__ + - ### Features + - Adds error message attribute to DescribeContact DataflowDetails + +## __AWS IoT__ + - ### Features + - Add new variable, lastStatusChangeDate, to DescribeDomainConfiguration API + +## __AWS Lambda Maven Archetype__ + - ### Bugfixes + - Fixed an issue where archetype generation failed with latest maven-archetype-plugin. See [#1981](https://github.com/aws/aws-sdk-java-v2/issues/1981) + +## __AWS SDK for Java v2__ + - ### Features + - Add support for plus (+) character in profile names + - Updated service endpoint metadata. + +## __AWS Transfer Family__ + - ### Features + - Add support to associate VPC Security Groups at server creation. + +## __AWS X-Ray__ + - ### Features + - Enhancing CreateGroup, UpdateGroup, GetGroup and GetGroups APIs to support configuring X-Ray Insights Notifications. Adding TraceLimit information into X-Ray BatchGetTraces API response. + +## __Access Analyzer__ + - ### Features + - This release adds support for the ApplyArchiveRule api in IAM Access Analyzer. The ApplyArchiveRule api allows users to apply an archive rule retroactively to existing findings in an analyzer. + +## __Amazon Macie 2__ + - ### Features + - This release of the Amazon Macie API adds support for pausing and resuming classification jobs. Also, sensitive data findings now include location data for up to 15 occurrences of sensitive data. + +## __Amazon Rekognition__ + - ### Features + - This SDK Release introduces new API (DetectProtectiveEquipment) for Amazon Rekognition. This release also adds ServiceQuotaExceeded exception to Amazon Rekognition IndexFaces API. + +## __Amazon Relational Database Service__ + - ### Features + - Return tags for all resources in the output of DescribeDBInstances, DescribeDBSnapshots, DescribeDBClusters, and DescribeDBClusterSnapshots API operations. + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - This Patch Manager release now supports searching for available packages from Amazon Linux and Amazon Linux 2 via the DescribeAvailablePatches API. + +## __Amazon WorkMail__ + - ### Features + - Add CreateOrganization and DeleteOrganization API operations. + +## __Amazon WorkSpaces__ + - ### Features + - Documentation updates for WorkSpaces + # __2.15.7__ __2020-10-09__ ## __AWS Amplify__ - ### Features @@ -4834,6 +5892,7 @@ ## __Amazon Elastic Compute Cloud__ - ### Features - This release of Amazon Elastic Compute Cloud (Amazon EC2) introduces support for Amazon Elastic Block Store (Amazon EBS) fast snapshot restores. + - Upgrades to Instance Metadata Service version 2 (IMDS v2). With IMDS v2, a session token is used to make requests for EC2 instance metadata and credentials. ## __Amazon FSx__ - ### Features diff --git a/README.md b/README.md index 6d1f6bc4bcb9..56801935eced 100644 --- a/README.md +++ b/README.md @@ -49,7 +49,7 @@ To automatically manage module versions (currently all modules have the same ver software.amazon.awssdk bom - 2.15.7 + 2.15.39 pom import @@ -83,12 +83,12 @@ Alternatively you can add dependencies for the specific services you use only: software.amazon.awssdk ec2 - 2.15.7 + 2.15.39 software.amazon.awssdk s3 - 2.15.7 + 2.15.39 ``` @@ -100,7 +100,7 @@ You can import the whole SDK into your project (includes *ALL* services). Please software.amazon.awssdk aws-sdk-java - 2.15.7 + 2.15.39 ``` @@ -132,6 +132,12 @@ You can find sample code for v2 in the following places: * [aws-doc-sdk-examples] repo. * Integration tests in this repo. They are located in the `it` directory under each service module, eg: [s3-integration-tests] +## Maintenance and Support for SDK Major Versions +For information about maintenance and support for SDK major versions and their underlying dependencies, see the following in the AWS SDKs and Tools Shared Configuration and Credentials Reference Guide: + +* [AWS SDKs and Tools Maintenance Policy][maintenance-policy] +* [AWS SDKs and Tools Version Support Matrix][version-matrix] + ## Giving Feedback We need your help in making this SDK great. Please participate in the community and contribute to this effort by submitting issues, participating in discussion forums and submitting pull requests through the following channels: @@ -161,3 +167,5 @@ We need your help in making this SDK great. Please participate in the community [bom]: http://search.maven.org/#search%7Cgav%7C1%7Cg%3A%22software.amazon.awssdk%22%20AND%20a%3A%22bom%22 [aws-doc-sdk-examples]: https://github.com/awsdocs/aws-doc-sdk-examples/tree/master/javav2 [s3-integration-tests]: https://github.com/aws/aws-sdk-java-v2/tree/master/services/s3/src/it/java/software/amazon/awssdk/services/s3 +[maintenance-policy]: https://docs.aws.amazon.com/credref/latest/refdocs/maint-policy.html +[version-matrix]: https://docs.aws.amazon.com/credref/latest/refdocs/version-support-matrix.html diff --git a/archetypes/archetype-lambda/pom.xml b/archetypes/archetype-lambda/pom.xml index 4cbb805e531b..f2ee36b2cbf9 100644 --- a/archetypes/archetype-lambda/pom.xml +++ b/archetypes/archetype-lambda/pom.xml @@ -20,7 +20,7 @@ archetypes software.amazon.awssdk - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT 4.0.0 archetype-lambda diff --git a/archetypes/pom.xml b/archetypes/pom.xml index bde44685ed5d..9b16828cc70f 100644 --- a/archetypes/pom.xml +++ b/archetypes/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT 4.0.0 archetypes diff --git a/aws-sdk-java/pom.xml b/aws-sdk-java/pom.xml index 6ba53e8f4e66..9db31c6c9d67 100644 --- a/aws-sdk-java/pom.xml +++ b/aws-sdk-java/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT ../pom.xml aws-sdk-java @@ -1178,6 +1178,66 @@ Amazon AutoScaling, etc). s3outposts ${awsjavasdk.version} + + software.amazon.awssdk + databrew + ${awsjavasdk.version} + + + software.amazon.awssdk + servicecatalogappregistry + ${awsjavasdk.version} + + + software.amazon.awssdk + networkfirewall + ${awsjavasdk.version} + + + software.amazon.awssdk + mwaa + ${awsjavasdk.version} + + + software.amazon.awssdk + devopsguru + ${awsjavasdk.version} + + + software.amazon.awssdk + sagemakerfeaturestoreruntime + ${awsjavasdk.version} + + + software.amazon.awssdk + appintegrations + ${awsjavasdk.version} + + + software.amazon.awssdk + ecrpublic + ${awsjavasdk.version} + + + software.amazon.awssdk + amplifybackend + ${awsjavasdk.version} + + + software.amazon.awssdk + connectcontactlens + ${awsjavasdk.version} + + + software.amazon.awssdk + lookoutvision + ${awsjavasdk.version} + + + software.amazon.awssdk + customerprofiles + ${awsjavasdk.version} + ${project.artifactId}-${project.version} diff --git a/bom-internal/pom.xml b/bom-internal/pom.xml index aff1190450c5..2011c90c086f 100644 --- a/bom-internal/pom.xml +++ b/bom-internal/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT 4.0.0 diff --git a/bom/pom.xml b/bom/pom.xml index f29d383a2061..64c362c9f1c8 100644 --- a/bom/pom.xml +++ b/bom/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT ../pom.xml bom @@ -1298,6 +1298,71 @@ s3outposts ${awsjavasdk.version} + + software.amazon.awssdk + metrics-spi + ${awsjavasdk.version} + + + software.amazon.awssdk + databrew + ${awsjavasdk.version} + + + software.amazon.awssdk + servicecatalogappregistry + ${awsjavasdk.version} + + + software.amazon.awssdk + networkfirewall + ${awsjavasdk.version} + + + software.amazon.awssdk + mwaa + ${awsjavasdk.version} + + + software.amazon.awssdk + devopsguru + ${awsjavasdk.version} + + + software.amazon.awssdk + sagemakerfeaturestoreruntime + ${awsjavasdk.version} + + + software.amazon.awssdk + appintegrations + ${awsjavasdk.version} + + + software.amazon.awssdk + ecrpublic + ${awsjavasdk.version} + + + software.amazon.awssdk + amplifybackend + ${awsjavasdk.version} + + + software.amazon.awssdk + connectcontactlens + ${awsjavasdk.version} + + + software.amazon.awssdk + lookoutvision + ${awsjavasdk.version} + + + software.amazon.awssdk + customerprofiles + ${awsjavasdk.version} + diff --git a/build-tools/src/main/resources/software/amazon/awssdk/checkstyle-suppressions.xml b/build-tools/src/main/resources/software/amazon/awssdk/checkstyle-suppressions.xml index 394a616bb1ed..f4093089227f 100644 --- a/build-tools/src/main/resources/software/amazon/awssdk/checkstyle-suppressions.xml +++ b/build-tools/src/main/resources/software/amazon/awssdk/checkstyle-suppressions.xml @@ -42,4 +42,12 @@ + + + + + + diff --git a/build-tools/src/main/resources/software/amazon/awssdk/checkstyle.xml b/build-tools/src/main/resources/software/amazon/awssdk/checkstyle.xml index 05001c3fba7b..f02fa4c0f460 100644 --- a/build-tools/src/main/resources/software/amazon/awssdk/checkstyle.xml +++ b/build-tools/src/main/resources/software/amazon/awssdk/checkstyle.xml @@ -365,6 +365,16 @@ + + + + + + + + + @@ -373,6 +383,14 @@ + + + + + + + + diff --git a/build-tools/src/main/resources/software/amazon/awssdk/spotbugs-suppressions.xml b/build-tools/src/main/resources/software/amazon/awssdk/spotbugs-suppressions.xml index dd7633f47658..448b951a533d 100644 --- a/build-tools/src/main/resources/software/amazon/awssdk/spotbugs-suppressions.xml +++ b/build-tools/src/main/resources/software/amazon/awssdk/spotbugs-suppressions.xml @@ -212,8 +212,10 @@ + - - + + + diff --git a/bundle/pom.xml b/bundle/pom.xml index 7d6f1936123f..82380f3b5afb 100644 --- a/bundle/pom.xml +++ b/bundle/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT bundle jar diff --git a/codegen-lite-maven-plugin/pom.xml b/codegen-lite-maven-plugin/pom.xml index 3477de2cc99c..a071b0fd253d 100644 --- a/codegen-lite-maven-plugin/pom.xml +++ b/codegen-lite-maven-plugin/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT ../pom.xml codegen-lite-maven-plugin diff --git a/codegen-lite/pom.xml b/codegen-lite/pom.xml index 84f4bb764c41..9434a1e8e4a2 100644 --- a/codegen-lite/pom.xml +++ b/codegen-lite/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT codegen-lite AWS Java SDK :: Code Generator Lite diff --git a/codegen-maven-plugin/pom.xml b/codegen-maven-plugin/pom.xml index 980a88f62b7c..6e1d6d25e5b6 100644 --- a/codegen-maven-plugin/pom.xml +++ b/codegen-maven-plugin/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT ../pom.xml codegen-maven-plugin diff --git a/codegen/pom.xml b/codegen/pom.xml index 99d6d095e68d..d9d6248df79a 100644 --- a/codegen/pom.xml +++ b/codegen/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT codegen AWS Java SDK :: Code Generator diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/AddCustomAuthorizers.java b/codegen/src/main/java/software/amazon/awssdk/codegen/AddCustomAuthorizers.java deleted file mode 100644 index 011d5bb88dcd..000000000000 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/AddCustomAuthorizers.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.codegen; - -import java.util.Map; -import java.util.stream.Collectors; -import software.amazon.awssdk.codegen.model.intermediate.AuthorizerModel; -import software.amazon.awssdk.codegen.model.service.ServiceModel; -import software.amazon.awssdk.codegen.naming.NamingStrategy; - -public class AddCustomAuthorizers { - private final ServiceModel service; - private final NamingStrategy namingStrategy; - - public AddCustomAuthorizers(ServiceModel service, NamingStrategy namingStrategy) { - this.service = service; - this.namingStrategy = namingStrategy; - } - - public Map constructAuthorizers() { - return service.getAuthorizers().values().stream() - .map(a -> new AuthorizerModel(a.getName(), - namingStrategy.getAuthorizerClassName(a.getName()), a.getTokenLocation(), - a.getTokenName())) - .collect(Collectors.toMap(AuthorizerModel::getName, a -> a)); - } -} diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/AddMetadata.java b/codegen/src/main/java/software/amazon/awssdk/codegen/AddMetadata.java index 0f2bd2ea6627..3dbb1f02aa8c 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/AddMetadata.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/AddMetadata.java @@ -20,7 +20,6 @@ import software.amazon.awssdk.codegen.model.intermediate.Metadata; import software.amazon.awssdk.codegen.model.intermediate.Protocol; import software.amazon.awssdk.codegen.model.service.AuthType; -import software.amazon.awssdk.codegen.model.service.Operation; import software.amazon.awssdk.codegen.model.service.ServiceMetadata; import software.amazon.awssdk.codegen.model.service.ServiceModel; import software.amazon.awssdk.codegen.naming.DefaultNamingStrategy; @@ -38,21 +37,11 @@ private AddMetadata() { public static Metadata constructMetadata(ServiceModel serviceModel, CustomizationConfig customizationConfig) { - Metadata metadata = new Metadata(); NamingStrategy namingStrategy = new DefaultNamingStrategy(serviceModel, customizationConfig); ServiceMetadata serviceMetadata = serviceModel.getMetadata(); - - String serviceName; - String rootPackageName; - - if (serviceMetadata.getProtocol().equals(Protocol.API_GATEWAY.getValue())) { - throw new UnsupportedOperationException("Java SDK V2 doesn't support api-gateway protocol yet"); - } else { - serviceName = namingStrategy.getServiceName(); - rootPackageName = AWS_PACKAGE_PREFIX; - } + String serviceName = namingStrategy.getServiceName(); metadata.withApiVersion(serviceMetadata.getApiVersion()) .withAsyncClient(String.format(Constant.ASYNC_CLIENT_CLASS_NAME_PATTERN, serviceName)) @@ -62,7 +51,7 @@ public static Metadata constructMetadata(ServiceModel serviceModel, .withBaseBuilderInterface(String.format(Constant.BASE_BUILDER_INTERFACE_NAME_PATTERN, serviceName)) .withBaseBuilder(String.format(Constant.BASE_BUILDER_CLASS_NAME_PATTERN, serviceName)) .withDocumentation(serviceModel.getDocumentation()) - .withRootPackageName(rootPackageName) + .withRootPackageName(AWS_PACKAGE_PREFIX) .withClientPackageName(namingStrategy.getClientPackageName(serviceName)) .withModelPackageName(namingStrategy.getModelPackageName(serviceName)) .withTransformPackageName(namingStrategy.getTransformPackageName(serviceName)) @@ -84,13 +73,10 @@ public static Metadata constructMetadata(ServiceModel serviceModel, .withEndpointPrefix(serviceMetadata.getEndpointPrefix()) .withSigningName(serviceMetadata.getSigningName()) .withAuthType(AuthType.fromValue(serviceMetadata.getSignatureVersion())) - .withRequiresApiKey(requiresApiKey(serviceModel)) .withUid(serviceMetadata.getUid()) .withServiceId(serviceMetadata.getServiceId()) - .withSupportsH2(supportsH2(serviceMetadata)); - - String jsonVersion = getJsonVersion(metadata, serviceMetadata); - metadata.setJsonVersion(jsonVersion); + .withSupportsH2(supportsH2(serviceMetadata)) + .withJsonVersion(getJsonVersion(metadata, serviceMetadata)); return metadata; } @@ -107,14 +93,4 @@ private static String getJsonVersion(Metadata metadata, ServiceMetadata serviceM return serviceMetadata.getJsonVersion(); } } - - /** - * If any operation requires an API key we generate a setter on the builder. - * - * @return True if any operation requires an API key. False otherwise. - */ - private static boolean requiresApiKey(ServiceModel serviceModel) { - return serviceModel.getOperations().values().stream() - .anyMatch(Operation::requiresApiKey); - } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/AddModelShapes.java b/codegen/src/main/java/software/amazon/awssdk/codegen/AddModelShapes.java index 92664189df4b..8a2492c3d003 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/AddModelShapes.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/AddModelShapes.java @@ -53,7 +53,7 @@ private Map constructModelShapes(Set shapesToSkip) { ShapeType shapeType = getModelShapeType(shape); if (shapeType != null) { - String javaClassName = getNamingStrategy().getJavaClassName(shapeName); + String javaClassName = getNamingStrategy().getShapeClassName(shapeName); if (shapesToSkip.contains(javaClassName)) { continue; } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/AddOperations.java b/codegen/src/main/java/software/amazon/awssdk/codegen/AddOperations.java index e6a2c3cac520..db3068630790 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/AddOperations.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/AddOperations.java @@ -162,6 +162,7 @@ public Map constructOperations() { operationModel.setEndpointOperation(op.isEndpointoperation()); operationModel.setEndpointDiscovery(op.getEndpointdiscovery()); operationModel.setEndpointTrait(op.getEndpoint()); + operationModel.setHttpChecksumRequired(op.isHttpChecksumRequired()); Input input = op.getInput(); if (input != null) { diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/AddShapes.java b/codegen/src/main/java/software/amazon/awssdk/codegen/AddShapes.java index 678198a22f2f..f5da83363eac 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/AddShapes.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/AddShapes.java @@ -159,7 +159,6 @@ private MemberModel generateMemberModel(String c2jMemberName, Member c2jMemberDe variableType + " type."); } - MemberModel memberModel = new MemberModel(); memberModel.withC2jName(c2jMemberName) @@ -443,7 +442,7 @@ private void fillContainerTypeMemberMetadata(Map c2jShapes, mapValueModel)); } else if (memberC2jShape.getEnumValues() != null) { // enum values - memberModel.withEnumType(getNamingStrategy().getJavaClassName(memberC2jShapeName)); + memberModel.withEnumType(getNamingStrategy().getShapeClassName(memberC2jShapeName)); } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/CodeGenerator.java b/codegen/src/main/java/software/amazon/awssdk/codegen/CodeGenerator.java index 41317040a59f..f8b12acbf705 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/CodeGenerator.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/CodeGenerator.java @@ -26,7 +26,6 @@ import software.amazon.awssdk.codegen.internal.Jackson; import software.amazon.awssdk.codegen.internal.Utils; import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; -import software.amazon.awssdk.codegen.model.intermediate.Protocol; public class CodeGenerator { @@ -125,14 +124,8 @@ private void emitCode(IntermediateModel intermediateModel) { } private GeneratorTask createGeneratorTasks(IntermediateModel intermediateModel) { - // For clients built internally, the output directory and source directory are the same. - GeneratorTaskParams params = GeneratorTaskParams.create(intermediateModel, sourcesDirectory, testsDirectory); + return new AwsGeneratorTasks(GeneratorTaskParams.create(intermediateModel, sourcesDirectory, testsDirectory)); - if (params.getModel().getMetadata().getProtocol() == Protocol.API_GATEWAY) { - throw new UnsupportedOperationException("Unsupported protocol: " + Protocol.API_GATEWAY); - } else { - return new AwsGeneratorTasks(params); - } } /** diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/IntermediateModelBuilder.java b/codegen/src/main/java/software/amazon/awssdk/codegen/IntermediateModelBuilder.java index 3b13b00b63a7..e722b421fe92 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/IntermediateModelBuilder.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/IntermediateModelBuilder.java @@ -32,11 +32,9 @@ import software.amazon.awssdk.codegen.internal.TypeUtils; import software.amazon.awssdk.codegen.internal.Utils; import software.amazon.awssdk.codegen.model.config.customization.CustomizationConfig; -import software.amazon.awssdk.codegen.model.intermediate.AuthorizerModel; import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; import software.amazon.awssdk.codegen.model.intermediate.MemberModel; import software.amazon.awssdk.codegen.model.intermediate.OperationModel; -import software.amazon.awssdk.codegen.model.intermediate.Protocol; import software.amazon.awssdk.codegen.model.intermediate.ShapeModel; import software.amazon.awssdk.codegen.model.service.AuthType; import software.amazon.awssdk.codegen.model.service.Operation; @@ -96,8 +94,6 @@ public IntermediateModel build() { Map shapes = new HashMap<>(); Map operations = new TreeMap<>(new AddOperations(this).constructOperations()); - Map authorizers = - new HashMap<>(new AddCustomAuthorizers(this.service, getNamingStrategy()).constructAuthorizers()); // Iterate through every operation and build an 'endpointOperation' if at least one operation that supports // endpoint discovery is found. If -any operations that require- endpoint discovery are found, then the flag @@ -133,7 +129,7 @@ public IntermediateModel build() { IntermediateModel fullModel = new IntermediateModel( constructMetadata(service, customConfig), operations, shapes, - customConfig, endpointOperation, authorizers, paginators.getPagination(), namingStrategy, + customConfig, endpointOperation, paginators.getPagination(), namingStrategy, waiters.getWaiters()); customization.postprocess(fullModel); @@ -151,7 +147,6 @@ public IntermediateModel build() { trimmedShapes, fullModel.getCustomizationConfig(), endpointOperation, - fullModel.getCustomAuthorizers(), fullModel.getPaginators(), namingStrategy, fullModel.getWaiters()); @@ -162,6 +157,8 @@ public IntermediateModel build() { setSimpleMethods(trimmedModel); + namingStrategy.validateCustomerVisibleNaming(trimmedModel); + return trimmedModel; } @@ -210,33 +207,15 @@ private void linkCustomAuthorizationToRequestShapes(IntermediateModel model) { operation.getOperationName())); } - if (model.getMetadata().getProtocol() == Protocol.API_GATEWAY) { - linkAuthorizationToRequestShapeForApiGatewayProtocol(model, c2jOperation, shape); - } else { - linkAuthorizationToRequestShapeForAwsProtocol(c2jOperation.getAuthtype(), shape); - } + linkAuthorizationToRequestShapeForAwsProtocol(c2jOperation.getAuthtype(), shape); }); } - private void linkAuthorizationToRequestShapeForApiGatewayProtocol(IntermediateModel model, - Operation c2jOperation, - ShapeModel shape) { - if (AuthType.CUSTOM.equals(c2jOperation.getAuthtype())) { - AuthorizerModel auth = model.getCustomAuthorizers().get(c2jOperation.getAuthorizer()); - if (auth == null) { - throw new RuntimeException(String.format("Required custom auth not defined: %s", - c2jOperation.getAuthorizer())); - } - shape.setRequestSignerClassFqcn(model.getMetadata().getAuthPolicyPackageName() + '.' + - auth.getInterfaceName()); - } else if (AuthType.IAM.equals(c2jOperation.getAuthtype())) { - model.getMetadata().setRequiresIamSigners(true); - // TODO IamRequestSigner does not exist - shape.setRequestSignerClassFqcn("software.amazon.awssdk.opensdk.protect.auth.IamRequestSigner"); + private void linkAuthorizationToRequestShapeForAwsProtocol(AuthType authType, ShapeModel shape) { + if (authType == null) { + return; } - } - private void linkAuthorizationToRequestShapeForAwsProtocol(AuthType authType, ShapeModel shape) { switch (authType) { case V4: shape.setRequestSignerClassFqcn("software.amazon.awssdk.auth.signer.Aws4Signer"); @@ -245,8 +224,6 @@ private void linkAuthorizationToRequestShapeForAwsProtocol(AuthType authType, Sh shape.setRequestSignerClassFqcn("software.amazon.awssdk.auth.signer.Aws4UnsignedPayloadSigner"); break; case NONE: - case IAM: - // just ignore this, this is the default value but only applicable to APIG generated clients break; default: throw new IllegalArgumentException("Unsupported authtype for AWS Request: " + authType); diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/internal/TypeUtils.java b/codegen/src/main/java/software/amazon/awssdk/codegen/internal/TypeUtils.java index 1d6b554bd99e..ccbb8a39338f 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/internal/TypeUtils.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/internal/TypeUtils.java @@ -133,7 +133,7 @@ public String getJavaDataType(Map shapes, String shapeName, String shapeType = shape.getType(); if (Structure.getName().equals(shapeType)) { - return namingStrategy.getJavaClassName(shapeName); + return namingStrategy.getShapeClassName(shapeName); } else if (List.getName().equals(shapeType)) { String listContainerType = DATA_TYPE_MAPPINGS.get(TypeKey.LIST_INTERFACE); return listContainerType + "<" + diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/config/customization/CustomizationConfig.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/config/customization/CustomizationConfig.java index 7a90070f5548..c9fd9f339967 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/config/customization/CustomizationConfig.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/config/customization/CustomizationConfig.java @@ -119,10 +119,6 @@ public class CustomizationConfig { private Map modelMarshallerDefaultValueSupplier = new HashMap<>(); - private boolean useAutoConstructList = true; - - private boolean useAutoConstructMap = true; - /** * Custom Retry Policy */ @@ -169,6 +165,7 @@ public class CustomizationConfig { * Arnable fields used in s3 control */ private Map s3ArnableFields; + /** * Allow a customer to set an endpoint override AND bypass endpoint discovery on their client even when endpoint discovery * enabled is true and endpoint discovery is required for an operation. This customization should almost never be "true" @@ -185,6 +182,11 @@ public class CustomizationConfig { */ private Map> useLegacyEventGenerationScheme = new HashMap<>(); + /** + * How the code generator should behave when it encounters shapes with underscores in the name. + */ + private UnderscoresInNameBehavior underscoresInNameBehavior; + private CustomizationConfig() { } @@ -361,22 +363,6 @@ public void setModelMarshallerDefaultValueSupplier(Map modelMars this.modelMarshallerDefaultValueSupplier = modelMarshallerDefaultValueSupplier; } - public boolean isUseAutoConstructList() { - return useAutoConstructList; - } - - public void setUseAutoConstructList(boolean useAutoConstructList) { - this.useAutoConstructList = useAutoConstructList; - } - - public boolean isUseAutoConstructMap() { - return useAutoConstructMap; - } - - public void setUseAutoConstructMap(boolean useAutoConstructMap) { - this.useAutoConstructMap = useAutoConstructMap; - } - public String getCustomRetryPolicy() { return customRetryPolicy; } @@ -471,4 +457,17 @@ public Map> getUseLegacyEventGenerationScheme() { public void setUseLegacyEventGenerationScheme(Map> useLegacyEventGenerationScheme) { this.useLegacyEventGenerationScheme = useLegacyEventGenerationScheme; } + + public UnderscoresInNameBehavior getUnderscoresInNameBehavior() { + return underscoresInNameBehavior; + } + + public void setUnderscoresInNameBehavior(UnderscoresInNameBehavior behavior) { + this.underscoresInNameBehavior = behavior; + } + + public CustomizationConfig withUnderscoresInShapeNameBehavior(UnderscoresInNameBehavior behavior) { + this.underscoresInNameBehavior = behavior; + return this; + } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/config/customization/UnderscoresInNameBehavior.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/config/customization/UnderscoresInNameBehavior.java new file mode 100644 index 000000000000..428ae41babae --- /dev/null +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/config/customization/UnderscoresInNameBehavior.java @@ -0,0 +1,26 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.model.config.customization; + +/** + * Valid values for the {@link CustomizationConfig#setUnderscoresInNameBehavior} customization. + */ +public enum UnderscoresInNameBehavior { + /** + * Allow the underscores in names, and generating shapes with names that are non-idiomatic to the language. + */ + ALLOW +} diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/IntermediateModel.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/IntermediateModel.java index 29f335017e7c..4948ac406dae 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/IntermediateModel.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/IntermediateModel.java @@ -20,6 +20,7 @@ import java.io.InputStream; import java.io.UncheckedIOException; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; @@ -43,8 +44,6 @@ public final class IntermediateModel { private CustomizationConfig customizationConfig; - private Map customAuthorizers; - private Optional endpointOperation; private Map paginators; @@ -59,9 +58,11 @@ public final class IntermediateModel { } public IntermediateModel() { + this.operations = new HashMap<>(); + this.shapes = new HashMap<>(); this.endpointOperation = Optional.empty(); - this.paginators = Collections.emptyMap(); - this.waiters = Collections.emptyMap(); + this.paginators = new HashMap<>(); + this.waiters = new HashMap<>(); this.namingStrategy = null; } @@ -70,7 +71,7 @@ public IntermediateModel(Metadata metadata, Map shapes, CustomizationConfig customizationConfig) { this(metadata, operations, shapes, customizationConfig, null, - Collections.emptyMap(), Collections.emptyMap(), null, Collections.emptyMap()); + Collections.emptyMap(), null, Collections.emptyMap()); } public IntermediateModel( @@ -79,7 +80,6 @@ public IntermediateModel( Map shapes, CustomizationConfig customizationConfig, OperationModel endpointOperation, - Map customAuthorizers, Map paginators, NamingStrategy namingStrategy, Map waiters) { @@ -88,7 +88,6 @@ public IntermediateModel( this.shapes = shapes; this.customizationConfig = customizationConfig; this.endpointOperation = Optional.ofNullable(endpointOperation); - this.customAuthorizers = customAuthorizers; this.paginators = paginators; this.namingStrategy = namingStrategy; this.waiters = waiters; @@ -215,13 +214,9 @@ private static String loadDefaultFileHeader() { } public String getSdkBaseResponseFqcn() { - if (metadata.getProtocol() == Protocol.API_GATEWAY) { - return "software.amazon.awssdk.opensdk.BaseResult"; - } else { - return String.format("%s<%s>", - AwsResponse.class.getName(), - getResponseMetadataClassName()); - } + return String.format("%s<%s>", + AwsResponse.class.getName(), + getResponseMetadataClassName()); } private String getResponseMetadataClassName() { @@ -235,14 +230,6 @@ public List simpleMethodsRequiringTesting() { .collect(Collectors.toList()); } - public Map getCustomAuthorizers() { - return customAuthorizers; - } - - public void setCustomAuthorizers(Map customAuthorizers) { - this.customAuthorizers = customAuthorizers; - } - public Optional getEndpointOperation() { return endpointOperation; } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/MemberModel.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/MemberModel.java index f4ccd26fbd5d..a82127f17509 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/MemberModel.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/MemberModel.java @@ -220,22 +220,6 @@ public MemberModel withBeanStyleSetterMethodName(String beanStyleSetterName) { return this; } - // TODO: Remove when all marshallers switch over to new style - public String getSetterMethodName() { - return getBeanStyleSetterMethodName(); - } - - // TODO: Remove when all marshallers switch over to new style - public void setSetterMethodName(String setterMethodName) { - setBeanStyleGetterMethodName(setterMethodName); - } - - // TODO: Remove when all marshallers switch over to new style - public MemberModel withSetterMethodName(String setterMethodName) { - setSetterMethodName(setterMethodName); - return this; - } - public String getFluentSetterMethodName() { return fluentSetterMethodName; } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/Metadata.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/Metadata.java index 447e3ac012aa..eb3a83d9fb20 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/Metadata.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/Metadata.java @@ -511,7 +511,6 @@ public boolean isJsonProtocol() { return protocol == Protocol.CBOR || protocol == Protocol.ION || protocol == Protocol.AWS_JSON || - protocol == Protocol.API_GATEWAY || protocol == Protocol.REST_JSON; } @@ -531,7 +530,6 @@ public boolean isQueryProtocol() { */ public static boolean isNotRestProtocol(String protocol) { switch (Protocol.fromValue(protocol)) { - case API_GATEWAY: case REST_JSON: case REST_XML: return false; diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/OperationModel.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/OperationModel.java index aa1b55f8a906..eb0532056712 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/OperationModel.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/OperationModel.java @@ -62,6 +62,8 @@ public class OperationModel extends DocumentationModel { private EndpointTrait endpointTrait; + private boolean httpChecksumRequired; + public String getOperationName() { return operationName; } @@ -272,4 +274,12 @@ private boolean containsEventStream(ShapeModel shapeModel) { .filter(m -> m.getShape() != null) .anyMatch(m -> m.getShape().isEventStream()); } + + public boolean isHttpChecksumRequired() { + return httpChecksumRequired; + } + + public void setHttpChecksumRequired(boolean httpChecksumRequired) { + this.httpChecksumRequired = httpChecksumRequired; + } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/Protocol.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/Protocol.java index 9dff6efd810b..c24898551033 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/Protocol.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/Protocol.java @@ -25,7 +25,6 @@ public enum Protocol { CBOR("cbor"), QUERY("query"), REST_XML("rest-xml"), - API_GATEWAY("api-gateway"), ION("ion"); private String protocol; diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/Operation.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/Operation.java index fe1aefdd5863..d60eb076d2fb 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/Operation.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/Operation.java @@ -36,15 +36,15 @@ public class Operation { private List errors; - private boolean requiresApiKey; - private EndpointDiscovery endpointdiscovery; private boolean endpointoperation; private EndpointTrait endpoint; - private AuthType authtype = AuthType.IAM; + private AuthType authtype; + + private boolean httpChecksumRequired; public String getName() { return name; @@ -133,14 +133,6 @@ public void setAuthorizer(String authorizer) { this.authorizer = authorizer; } - public boolean requiresApiKey() { - return requiresApiKey; - } - - public void setRequiresApiKey(boolean requiresApiKey) { - this.requiresApiKey = requiresApiKey; - } - public EndpointDiscovery getEndpointdiscovery() { return endpointdiscovery; } @@ -164,4 +156,12 @@ public EndpointTrait getEndpoint() { public void setEndpoint(EndpointTrait endpoint) { this.endpoint = endpoint; } + + public boolean isHttpChecksumRequired() { + return httpChecksumRequired; + } + + public void setHttpChecksumRequired(boolean httpChecksumRequired) { + this.httpChecksumRequired = httpChecksumRequired; + } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/Shape.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/Shape.java index c615d66fe57c..70971f8ad5a2 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/Shape.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/Shape.java @@ -20,7 +20,6 @@ import java.util.Map; public class Shape { - private String type; private Map members = Collections.emptyMap(); diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/naming/DefaultNamingStrategy.java b/codegen/src/main/java/software/amazon/awssdk/codegen/naming/DefaultNamingStrategy.java index b6bed715e3e7..0a996a7fbdf2 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/naming/DefaultNamingStrategy.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/naming/DefaultNamingStrategy.java @@ -16,7 +16,6 @@ package software.amazon.awssdk.codegen.naming; import static java.util.stream.Collectors.joining; -import static software.amazon.awssdk.codegen.internal.Constant.AUTHORIZER_NAME_PREFIX; import static software.amazon.awssdk.codegen.internal.Constant.CONFLICTING_NAME_SUFFIX; import static software.amazon.awssdk.codegen.internal.Constant.EXCEPTION_CLASS_SUFFIX; import static software.amazon.awssdk.codegen.internal.Constant.FAULT_CLASS_SUFFIX; @@ -33,22 +32,30 @@ import java.util.Objects; import java.util.Optional; import java.util.Set; +import java.util.regex.Pattern; import java.util.stream.Stream; import software.amazon.awssdk.codegen.internal.Constant; import software.amazon.awssdk.codegen.internal.Utils; import software.amazon.awssdk.codegen.model.config.customization.CustomizationConfig; +import software.amazon.awssdk.codegen.model.config.customization.UnderscoresInNameBehavior; +import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; import software.amazon.awssdk.codegen.model.intermediate.MemberModel; +import software.amazon.awssdk.codegen.model.intermediate.Metadata; import software.amazon.awssdk.codegen.model.service.ServiceModel; import software.amazon.awssdk.codegen.model.service.Shape; import software.amazon.awssdk.utils.Logger; import software.amazon.awssdk.utils.StringUtils; +import software.amazon.awssdk.utils.Validate; /** * Default implementation of naming strategy respecting. */ public class DefaultNamingStrategy implements NamingStrategy { - private static Logger log = Logger.loggerFor(DefaultNamingStrategy.class); + private static final Logger log = Logger.loggerFor(DefaultNamingStrategy.class); + + private static final Pattern VALID_IDENTIFIER_NAME = + Pattern.compile("\\p{javaJavaIdentifierStart}\\p{javaJavaIdentifierPart}*"); private static final String COLLISION_DISAMBIGUATION_PREFIX = "Default"; @@ -97,7 +104,7 @@ public class DefaultNamingStrategy implements NamingStrategy { public DefaultNamingStrategy(ServiceModel serviceModel, CustomizationConfig customizationConfig) { this.serviceModel = serviceModel; - this.customizationConfig = customizationConfig; + this.customizationConfig = customizationConfig == null ? CustomizationConfig.create() : customizationConfig; } private static boolean isJavaKeyword(String word) { @@ -179,7 +186,6 @@ public String getSmokeTestPackageName(String serviceName) { * if provided or service name with the shared service name. */ private String concatServiceNameIfShareModel(String serviceName) { - if (customizationConfig.getShareModelConfig() != null) { return customizationConfig.getShareModelConfig().getShareModelWith() + "." + Optional.ofNullable(customizationConfig.getShareModelConfig().getPackageName()).orElse(serviceName); @@ -269,20 +275,8 @@ public String getEnumValueName(String enumValue) { } @Override - public String getJavaClassName(String shapeName) { - return Arrays.stream(shapeName.split("[._-]|\\W")) - .filter(s -> !StringUtils.isEmpty(s)) - .map(Utils::capitalize) - .collect(joining()); - } - - @Override - public String getAuthorizerClassName(String shapeName) { - String converted = getJavaClassName(shapeName); - if (converted.length() > 0 && !Character.isLetter(converted.charAt(0))) { - return AUTHORIZER_NAME_PREFIX + converted; - } - return converted; + public String getShapeClassName(String shapeName) { + return Utils.capitalize(shapeName); } @Override @@ -384,4 +378,63 @@ private boolean isDisallowedNameForShape(String name, Shape parentShape) { return RESERVED_STRUCTURE_METHOD_NAMES.contains(name); } } + + @Override + public void validateCustomerVisibleNaming(IntermediateModel trimmedModel) { + Metadata metadata = trimmedModel.getMetadata(); + validateCustomerVisibleName(metadata.getSyncInterface(), "metadata-derived interface name"); + validateCustomerVisibleName(metadata.getSyncBuilderInterface(), "metadata-derived builder interface name"); + validateCustomerVisibleName(metadata.getAsyncInterface(), "metadata-derived async interface name"); + validateCustomerVisibleName(metadata.getAsyncBuilderInterface(), "metadata-derived async builder interface name"); + validateCustomerVisibleName(metadata.getBaseBuilderInterface(), "metadata-derived builder interface name"); + validateCustomerVisibleName(metadata.getBaseExceptionName(), "metadata-derived exception name"); + validateCustomerVisibleName(metadata.getBaseRequestName(), "metadata-derived request name"); + validateCustomerVisibleName(metadata.getBaseResponseName(), "metadata-derived response name"); + + trimmedModel.getOperations().values().forEach(operation -> { + validateCustomerVisibleName(operation.getOperationName(), "operations"); + }); + + trimmedModel.getWaiters().forEach((name, waiter) -> { + validateCustomerVisibleName(name, "waiters"); + }); + + trimmedModel.getShapes().values().forEach(shape -> { + String shapeName = shape.getShapeName(); + validateCustomerVisibleName(shapeName, "shapes"); + shape.getMembers().forEach(member -> { + validateCustomerVisibleName(member.getFluentGetterMethodName(), shapeName + " shape"); + validateCustomerVisibleName(member.getFluentSetterMethodName(), shapeName + " shape"); + validateCustomerVisibleName(member.getFluentEnumGetterMethodName(), shapeName + " shape"); + validateCustomerVisibleName(member.getFluentEnumSetterMethodName(), shapeName + " shape"); + validateCustomerVisibleName(member.getExistenceCheckMethodName(), shapeName + " shape"); + validateCustomerVisibleName(member.getBeanStyleGetterMethodName(), shapeName + " shape"); + validateCustomerVisibleName(member.getBeanStyleSetterMethodName(), shapeName + " shape"); + validateCustomerVisibleName(member.getEnumType(), shapeName + " shape"); + }); + }); + } + + private void validateCustomerVisibleName(String name, String location) { + if (name == null) { + return; + } + + if (name.contains("_")) { + UnderscoresInNameBehavior behavior = customizationConfig.getUnderscoresInNameBehavior(); + + String supportedBehaviors = Arrays.toString(UnderscoresInNameBehavior.values()); + Validate.notNull(behavior, + "Encountered a name or identifier that the customer will see (%s in the %s) with an underscore. " + + "This isn't idiomatic in Java. Please either remove the underscores or apply the " + + "'underscoresInNameBehavior' customization for this service (Supported " + + "'underscoresInNameBehavior' values: %s).", name, location, supportedBehaviors); + Validate.isTrue(behavior == UnderscoresInNameBehavior.ALLOW, + "Unsupported underscoresInShapeNameBehavior: %s. Supported values: %s", behavior, supportedBehaviors); + } + + Validate.isTrue(VALID_IDENTIFIER_NAME.matcher(name).matches(), + "Encountered a name or identifier that is invalid within Java (%s in %s). Please remove invalid " + + "characters.", name, location); + } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/naming/NamingStrategy.java b/codegen/src/main/java/software/amazon/awssdk/codegen/naming/NamingStrategy.java index 2af31ff106b3..d59a34df29b6 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/naming/NamingStrategy.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/naming/NamingStrategy.java @@ -15,6 +15,7 @@ package software.amazon.awssdk.codegen.naming; +import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; import software.amazon.awssdk.codegen.model.intermediate.MemberModel; import software.amazon.awssdk.codegen.model.service.Shape; @@ -97,13 +98,7 @@ public interface NamingStrategy { * @param shapeName Name of structure used to derive Java class name. * @return Appropriate name to use for a Java class for an arbitrary (not a request, response, error) structure. */ - String getJavaClassName(String shapeName); - - /** - * @param shapeName Name of an authorizer shape used to derive the authorizer name - * @return Appropriate name to use for a Java class for an Authorizer - */ - String getAuthorizerClassName(String shapeName); + String getShapeClassName(String shapeName); /** * @param memberName Member name to name getter for. @@ -160,4 +155,9 @@ public interface NamingStrategy { * @return Name of an existence check method. */ String getExistenceCheckMethodName(String memberName, Shape parentShape); + + /** + * Verify the customer-visible naming in the provided intermediate model will compile and is idiomatic to Java. + */ + void validateCustomerVisibleNaming(IntermediateModel trimmedModel); } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/AsyncClientClass.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/AsyncClientClass.java index ef46146eb90d..50a9a32cc366 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/AsyncClientClass.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/AsyncClientClass.java @@ -53,7 +53,6 @@ import software.amazon.awssdk.codegen.model.config.customization.UtilitiesMethod; import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; import software.amazon.awssdk.codegen.model.intermediate.OperationModel; -import software.amazon.awssdk.codegen.model.intermediate.Protocol; import software.amazon.awssdk.codegen.model.intermediate.ShapeModel; import software.amazon.awssdk.codegen.model.service.AuthType; import software.amazon.awssdk.codegen.poet.PoetExtensions; @@ -243,7 +242,7 @@ protected MethodSpec.Builder operationBody(MethodSpec.Builder builder, Operation builder.addStatement("apiCallMetricCollector.reportMetric($T.$L, $S)", CoreMetric.class, "OPERATION_NAME", opModel.getOperationName()); - if (model.getMetadata().getProtocol() != Protocol.API_GATEWAY && shouldUseAsyncWithBodySigner(opModel)) { + if (shouldUseAsyncWithBodySigner(opModel)) { builder.addCode(applyAsyncWithBodyV4SignerOverride(opModel)); } else { builder.addCode(ClientClassUtils.callApplySignerOverrideMethod(opModel)); @@ -445,7 +444,7 @@ private boolean shouldUseAsyncWithBodySigner(OperationModel opModel) { AuthType authTypeForOperation = opModel.getAuthType(); - if (authTypeForOperation == AuthType.IAM) { + if (authTypeForOperation == null) { authTypeForOperation = model.getMetadata().getAuthType(); } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/SyncClientClass.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/SyncClientClass.java index eb9c392a8855..95724e518af9 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/SyncClientClass.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/SyncClientClass.java @@ -335,8 +335,6 @@ static ProtocolSpec getProtocolSpecs(PoetExtensions poetExtensions, Intermediate case CBOR: case ION: return new JsonProtocolSpec(poetExtensions, model); - case API_GATEWAY: - throw new UnsupportedOperationException("Not yet supported."); default: throw new RuntimeException("Unknown protocol: " + protocol.name()); } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/Ec2ProtocolSpec.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/Ec2ProtocolSpec.java index e595f6739719..19ab56530848 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/Ec2ProtocolSpec.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/Ec2ProtocolSpec.java @@ -29,54 +29,4 @@ public Ec2ProtocolSpec(IntermediateModel model, PoetExtensions poetExtensions) { protected Class protocolFactoryClass() { return AwsEc2ProtocolFactory.class; } - - /* - TODO Dry run support - private MethodSpec dryRunMethod() { - TypeVariableName typeVariableName = TypeVariableName.get("X", AmazonWebServiceRequest.class); - ClassName dryRunResult = poetExtensions.getModelClass("DryRunResult"); - TypeName dryRunResultGeneric = ParameterizedTypeName.get(dryRunResult, typeVariableName); - ClassName dryRunRequest = poetExtensions.getModelClass("DryRunSupportedRequest"); - TypeName dryRunRequestGeneric = ParameterizedTypeName.get(dryRunRequest, typeVariableName); - return MethodSpec.methodBuilder("dryRun") - .returns(dryRunResultGeneric) - .addModifiers(Modifier.PUBLIC) - .addParameter(dryRunRequestGeneric, "request") - .addTypeVariable(typeVariableName) - .addStatement("$T dryRunRequest = request.getDryRunRequest()", - Request.class) - .beginControlFlow("try") - .addStatement("$T<$T> responseHandler = new $T<$T>(new $T())", - StaxResponseHandler.class, - dryRunResult, - StaxResponseHandler.class, - dryRunResult, - VoidStaxUnmarshaller.class) - .addStatement("\nclientHandler.execute(new $T<$T, $T>().marshaller($L).withResponseHandler($N)" + - ".withInput($L))", - ClientExecutionParams.class, - Request.class, - dryRunResult, - "null", - "responseHandler", - "dryRunRequest") - .addStatement("throw new $T($S)", SdkClientException.class, - "Unrecognized service response for the dry-run request.") - .endControlFlow() - .beginControlFlow("catch (AwsServiceException exception)") - .beginControlFlow("if (exception.errorCode().equals($S) && exception.statusCode() == 412)", - "DryRunOperation") - .addStatement("return new $T(true, request, exception.getMessage(), exception)", dryRunResultGeneric) - .endControlFlow() - .beginControlFlow("else if (exception.errorCode().equals($S) && exception.statusCode() == 403)", - "UnauthorizedOperation") - .addStatement("return new $T(false, request, exception.getMessage(), exception)", dryRunResultGeneric) - .endControlFlow() - .addStatement("throw new $T($S, exception)", SdkClientException.class, - "Unrecognized service response for the dry-run request.") - .endControlFlow() - .build(); - - } - */ } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/JsonProtocolSpec.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/JsonProtocolSpec.java index f4438a674eb5..679876843a0c 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/JsonProtocolSpec.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/JsonProtocolSpec.java @@ -39,6 +39,7 @@ import software.amazon.awssdk.codegen.model.intermediate.Protocol; import software.amazon.awssdk.codegen.model.intermediate.ShapeModel; import software.amazon.awssdk.codegen.poet.PoetExtensions; +import software.amazon.awssdk.codegen.poet.client.traits.HttpChecksumRequiredTrait; import software.amazon.awssdk.codegen.poet.eventstream.EventStreamUtils; import software.amazon.awssdk.core.SdkPojoBuilder; import software.amazon.awssdk.core.SdkResponse; @@ -79,21 +80,18 @@ public MethodSpec initProtocolFactory(IntermediateModel model) { TypeVariableName.get("T")); TypeVariableName typeVariableName = TypeVariableName.get("T", upperBound); - MethodSpec.Builder methodSpec = MethodSpec.methodBuilder("init") - .addTypeVariable(typeVariableName) - .addParameter(typeVariableName, "builder") - .returns(typeVariableName) - .addModifiers(Modifier.PRIVATE) - .addCode( - "return builder\n" + - ".clientConfiguration(clientConfiguration)\n" + - ".defaultServiceExceptionSupplier($T::builder)\n" + - ".protocol($T.$L)\n" + - ".protocolVersion($S)\n" - + "$L", - baseException, AwsJsonProtocol.class, - protocolEnumName(metadata.getProtocol()), metadata.getJsonVersion(), - customErrorCodeFieldName()); + MethodSpec.Builder methodSpec = + MethodSpec.methodBuilder("init") + .addTypeVariable(typeVariableName) + .addParameter(typeVariableName, "builder") + .returns(typeVariableName) + .addModifiers(Modifier.PRIVATE) + .addCode("return builder\n") + .addCode(".clientConfiguration(clientConfiguration)\n") + .addCode(".defaultServiceExceptionSupplier($T::builder)\n", baseException) + .addCode(".protocol($T.$L)\n", AwsJsonProtocol.class, protocolEnumName(metadata.getProtocol())) + .addCode(".protocolVersion($S)\n", metadata.getJsonVersion()) + .addCode("$L", customErrorCodeFieldName()); if (metadata.getContentType() != null) { methodSpec.addCode(".withContentTypeOverride($S)", metadata.getContentType()); @@ -126,12 +124,13 @@ public CodeBlock responseHandler(IntermediateModel model, OperationModel opModel TypeName pojoResponseType = getPojoResponseType(opModel, poetExtensions); String protocolFactory = protocolFactoryLiteral(model, opModel); - CodeBlock.Builder builder = CodeBlock.builder(); - builder.add("$T operationMetadata = $T.builder()\n" - + ".hasStreamingSuccessResponse($L)\n" - + ".isPayloadJson($L)\n" - + ".build();", JsonOperationMetadata.class, JsonOperationMetadata.class, - opModel.hasStreamingOutput(), !opModel.getHasBlobMemberAsPayload()); + CodeBlock.Builder builder = + CodeBlock.builder() + .add("$T operationMetadata = $T.builder()\n", JsonOperationMetadata.class, JsonOperationMetadata.class) + .add(".hasStreamingSuccessResponse($L)\n", opModel.hasStreamingOutput()) + .add(".isPayloadJson($L)\n", !opModel.getHasBlobMemberAsPayload()) + .add(".build();"); + if (opModel.hasEventStreamOutput()) { responseHandlersForEventStreaming(opModel, pojoResponseType, protocolFactory, builder); } else { @@ -161,24 +160,18 @@ public CodeBlock executionHandler(OperationModel opModel) { ClassName requestType = poetExtensions.getModelClass(opModel.getInput().getVariableType()); ClassName marshaller = poetExtensions.getRequestTransformClass(opModel.getInputShape().getShapeName() + "Marshaller"); - CodeBlock.Builder codeBlock = CodeBlock - .builder() - .add("\n\nreturn clientHandler.execute(new $T<$T, $T>()\n" + - ".withOperationName(\"$N\")\n" + - ".withResponseHandler($N)\n" + - ".withErrorResponseHandler($N)\n" + - hostPrefixExpression(opModel) + - discoveredEndpoint(opModel) + - ".withInput($L)\n", - ClientExecutionParams.class, - requestType, - responseType, - opModel.getOperationName(), - "responseHandler", - "errorResponseHandler", - opModel.getInput().getVariableName()); - - codeBlock.add(".withMetricCollector($N)", "apiCallMetricCollector"); + CodeBlock.Builder codeBlock = + CodeBlock.builder() + .add("\n\nreturn clientHandler.execute(new $T<$T, $T>()\n", + ClientExecutionParams.class, requestType, responseType) + .add(".withOperationName(\"$N\")\n", opModel.getOperationName()) + .add(".withResponseHandler(responseHandler)\n") + .add(".withErrorResponseHandler(errorResponseHandler)\n") + .add(hostPrefixExpression(opModel)) + .add(discoveredEndpoint(opModel)) + .add(".withInput($L)\n", opModel.getInput().getVariableName()) + .add(".withMetricCollector(apiCallMetricCollector)") + .add(HttpChecksumRequiredTrait.putHttpChecksumAttribute(opModel)); if (opModel.hasStreamingInput()) { codeBlock.add(".withRequestBody(requestBody)") @@ -206,22 +199,19 @@ public CodeBlock asyncExecutionHandler(IntermediateModel intermediateModel, Oper ClassName eventStreamBaseClass = poetExtensions.getModelClassFromShape(shapeModel); ParameterizedTypeName transformerType = ParameterizedTypeName.get( ClassName.get(EventStreamAsyncResponseTransformer.class), pojoResponseType, eventStreamBaseClass); - builder.addStatement("$1T<$2T> future = new $1T<>()", - ClassName.get(CompletableFuture.class), - ClassName.get(Void.class)); - builder.add("$T asyncResponseTransformer = $T.<$T, $T>builder()\n" + - " .eventStreamResponseHandler(asyncResponseHandler)\n" - + " .eventResponseHandler(eventResponseHandler)\n" - + " .initialResponseHandler(responseHandler)\n" - + " .exceptionResponseHandler(errorResponseHandler)\n" - + " .future(future)\n" - + " .executor(executor)\n" - + " .serviceName(serviceName())\n" - + " .build();", - transformerType, - ClassName.get(EventStreamAsyncResponseTransformer.class), - pojoResponseType, - eventStreamBaseClass); + + builder.add("$1T<$2T> future = new $1T<>();", ClassName.get(CompletableFuture.class), ClassName.get(Void.class)) + .add("$T asyncResponseTransformer = $T.<$T, $T>builder()\n", + transformerType, ClassName.get(EventStreamAsyncResponseTransformer.class), pojoResponseType, + eventStreamBaseClass) + .add(".eventStreamResponseHandler(asyncResponseHandler)\n") + .add(".eventResponseHandler(eventResponseHandler)\n") + .add(".initialResponseHandler(responseHandler)\n") + .add(".exceptionResponseHandler(errorResponseHandler)\n") + .add(".future(future)\n") + .add(".executor(executor)\n") + .add(".serviceName(serviceName())\n") + .add(".build();"); if (isRestJson) { builder.add(restAsyncResponseTransformer(pojoResponseType, eventStreamBaseClass)); @@ -235,36 +225,23 @@ public CodeBlock asyncExecutionHandler(IntermediateModel intermediateModel, Oper : pojoResponseType; TypeName executeFutureValueType = executeFutureValueType(opModel, poetExtensions); - builder.add("\n\n$T<$T> executeFuture = clientHandler.execute(new $T<$T, $T>()\n" + - ".withOperationName(\"$N\")\n" + - ".withMarshaller($L)\n" + - "$L" + - "$L" + - ".withResponseHandler($L)\n" + - ".withErrorResponseHandler(errorResponseHandler)\n" + - ".withMetricCollector(apiCallMetricCollector)\n" + - hostPrefixExpression(opModel) + - discoveredEndpoint(opModel) + - asyncRequestBody + - ".withInput($L)$L);", - CompletableFuture.class, - executeFutureValueType, - ClientExecutionParams.class, - requestType, - responseType, - opModel.getOperationName(), - asyncMarshaller(model, opModel, marshaller, protocolFactory), - opModel.hasEventStreamInput() ? CodeBlock.builder() - .add(".withAsyncRequestBody($T.fromPublisher(adapted))", - AsyncRequestBody.class) - .build() - .toString() - : "", - opModel.hasEventStreamInput() && opModel.hasEventStreamOutput() ? CodeBlock - .builder().add(".withFullDuplex(true)").build() : "", - opModel.hasEventStreamOutput() && !isRestJson ? "voidResponseHandler" : "responseHandler", - opModel.getInput().getVariableName(), - asyncResponseTransformerVariable(isStreaming, isRestJson, opModel)); + builder.add("\n\n$T<$T> executeFuture = clientHandler.execute(new $T<$T, $T>()\n", + CompletableFuture.class, executeFutureValueType, ClientExecutionParams.class, requestType, responseType) + .add(".withOperationName(\"$N\")\n", opModel.getOperationName()) + .add(".withMarshaller($L)\n", asyncMarshaller(model, opModel, marshaller, protocolFactory)) + .add(asyncRequestBody(opModel)) + .add(fullDuplex(opModel)) + .add(".withResponseHandler($L)\n", responseHandlerName(opModel, isRestJson)) + .add(".withErrorResponseHandler(errorResponseHandler)\n") + .add(".withMetricCollector(apiCallMetricCollector)\n") + .add(hostPrefixExpression(opModel)) + .add(discoveredEndpoint(opModel)) + .add(asyncRequestBody) + .add(HttpChecksumRequiredTrait.putHttpChecksumAttribute(opModel)) + .add(".withInput($L)$L);", + opModel.getInput().getVariableName(), asyncResponseTransformerVariable(isStreaming, isRestJson, opModel)); + + String whenComplete = whenCompleteBody(opModel, customerResponseHandler); if (!whenComplete.isEmpty()) { String whenCompletedFutureName = "whenCompleted"; @@ -283,6 +260,22 @@ public CodeBlock asyncExecutionHandler(IntermediateModel intermediateModel, Oper return builder.build(); } + private String responseHandlerName(OperationModel opModel, boolean isRestJson) { + return opModel.hasEventStreamOutput() && !isRestJson ? "voidResponseHandler" + : "responseHandler"; + } + + private CodeBlock fullDuplex(OperationModel opModel) { + return opModel.hasEventStreamInput() && opModel.hasEventStreamOutput() ? CodeBlock.of(".withFullDuplex(true)") + : CodeBlock.of(""); + } + + private CodeBlock asyncRequestBody(OperationModel opModel) { + return opModel.hasEventStreamInput() ? CodeBlock.of(".withAsyncRequestBody($T.fromPublisher(adapted))", + AsyncRequestBody.class) + : CodeBlock.of(""); + } + private String asyncResponseTransformerVariable(boolean isStreaming, boolean isRestJson, OperationModel opModel) { if (isStreaming) { if (opModel.hasEventStreamOutput() && isRestJson) { @@ -306,14 +299,12 @@ private CodeBlock restAsyncResponseTransformer(TypeName pojoResponseType, ClassN ParameterizedTypeName restTransformerType = ParameterizedTypeName.get( ClassName.get(RestEventStreamAsyncResponseTransformer.class), pojoResponseType, eventStreamBaseClass); return CodeBlock.builder() - .add("$T restAsyncResponseTransformer = $T.<$T, $T>builder()\n" - + ".eventStreamAsyncResponseTransformer(asyncResponseTransformer)\n" - + ".eventStreamResponseHandler(asyncResponseHandler)\n" - + ".build();", - restTransformerType, - ClassName.get(RestEventStreamAsyncResponseTransformer.class), - pojoResponseType, + .add("$T restAsyncResponseTransformer = $T.<$T, $T>builder()\n", + restTransformerType, ClassName.get(RestEventStreamAsyncResponseTransformer.class), pojoResponseType, eventStreamBaseClass) + .add(".eventStreamAsyncResponseTransformer(asyncResponseTransformer)\n") + .add(".eventStreamResponseHandler(asyncResponseHandler)\n") + .add(".build();") .build(); } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/QueryProtocolSpec.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/QueryProtocolSpec.java index 1c6327a0e063..2ea4cfc385a8 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/QueryProtocolSpec.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/QueryProtocolSpec.java @@ -29,6 +29,7 @@ import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; import software.amazon.awssdk.codegen.model.intermediate.OperationModel; import software.amazon.awssdk.codegen.poet.PoetExtensions; +import software.amazon.awssdk.codegen.poet.client.traits.HttpChecksumRequiredTrait; import software.amazon.awssdk.core.client.handler.ClientExecutionParams; import software.amazon.awssdk.core.http.HttpResponseHandler; import software.amazon.awssdk.protocols.query.AwsQueryProtocolFactory; @@ -99,24 +100,18 @@ public CodeBlock executionHandler(OperationModel opModel) { TypeName responseType = poetExtensions.getModelClass(opModel.getReturnType().getReturnType()); ClassName requestType = poetExtensions.getModelClass(opModel.getInput().getVariableType()); ClassName marshaller = poetExtensions.getTransformClass(opModel.getInputShape().getShapeName() + "Marshaller"); - CodeBlock.Builder codeBlock = CodeBlock - .builder() - .add("\n\nreturn clientHandler.execute(new $T<$T, $T>()" + - ".withOperationName(\"$N\")\n" + - ".withResponseHandler($N)" + - ".withErrorResponseHandler($N)" + - hostPrefixExpression(opModel) + - discoveredEndpoint(opModel) + - ".withInput($L)", - ClientExecutionParams.class, - requestType, - responseType, - opModel.getOperationName(), - "responseHandler", - "errorResponseHandler", - opModel.getInput().getVariableName()); - - codeBlock.add(".withMetricCollector($N)", "apiCallMetricCollector"); + CodeBlock.Builder codeBlock = + CodeBlock.builder() + .add("\n\nreturn clientHandler.execute(new $T<$T, $T>()", + ClientExecutionParams.class, requestType, responseType) + .add(".withOperationName($S)\n", opModel.getOperationName()) + .add(".withResponseHandler(responseHandler)\n") + .add(".withErrorResponseHandler(errorResponseHandler)\n") + .add(hostPrefixExpression(opModel)) + .add(discoveredEndpoint(opModel)) + .add(".withInput($L)", opModel.getInput().getVariableName()) + .add(".withMetricCollector(apiCallMetricCollector)") + .add(HttpChecksumRequiredTrait.putHttpChecksumAttribute(opModel)); if (opModel.hasStreamingInput()) { return codeBlock.add(".withRequestBody(requestBody)") @@ -136,27 +131,23 @@ public CodeBlock asyncExecutionHandler(IntermediateModel intermediateModel, Oper String asyncRequestBody = opModel.hasStreamingInput() ? ".withAsyncRequestBody(requestBody)" : ""; TypeName executeFutureValueType = executeFutureValueType(opModel, poetExtensions); - CodeBlock.Builder builder = CodeBlock.builder().add("\n\n$T<$T> executeFuture = clientHandler.execute(new $T<$T, $T>()" - + "\n" + - ".withOperationName(\"$N\")\n" + - ".withMarshaller($L)" + - ".withResponseHandler(responseHandler)" + - ".withErrorResponseHandler($N)\n" + - ".withMetricCollector(apiCallMetricCollector)\n" + - hostPrefixExpression(opModel) + - asyncRequestBody + - ".withInput($L) $L);", - CompletableFuture.class, - executeFutureValueType, - ClientExecutionParams.class, - requestType, - pojoResponseType, - opModel.getOperationName(), - asyncMarshaller(intermediateModel, opModel, marshaller, - "protocolFactory"), - "errorResponseHandler", - opModel.getInput().getVariableName(), - opModel.hasStreamingOutput() ? ", asyncResponseTransformer" : ""); + CodeBlock.Builder builder = + CodeBlock.builder() + .add("\n\n$T<$T> executeFuture = clientHandler.execute(new $T<$T, $T>()\n", + CompletableFuture.class, executeFutureValueType, ClientExecutionParams.class, + requestType, pojoResponseType) + .add(".withOperationName(\"$N\")\n", opModel.getOperationName()) + .add(".withMarshaller($L)\n", + asyncMarshaller(intermediateModel, opModel, marshaller, "protocolFactory")) + .add(".withResponseHandler(responseHandler)\n") + .add(".withErrorResponseHandler(errorResponseHandler)\n") + .add(".withMetricCollector(apiCallMetricCollector)\n") + .add(HttpChecksumRequiredTrait.putHttpChecksumAttribute(opModel)); + + builder.add(hostPrefixExpression(opModel) + asyncRequestBody + ".withInput($L)$L);", + opModel.getInput().getVariableName(), + opModel.hasStreamingOutput() ? ", asyncResponseTransformer" : ""); + builder.addStatement("$T requestOverrideConfig = $L.overrideConfiguration().orElse(null)", AwsRequestOverrideConfiguration.class, opModel.getInput().getVariableName()); diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/XmlProtocolSpec.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/XmlProtocolSpec.java index 58341ff1ddc7..df92aca61ce0 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/XmlProtocolSpec.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/XmlProtocolSpec.java @@ -29,6 +29,8 @@ import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; import software.amazon.awssdk.codegen.model.intermediate.OperationModel; import software.amazon.awssdk.codegen.poet.PoetExtensions; +import software.amazon.awssdk.codegen.poet.client.traits.HttpChecksumRequiredTrait; +import software.amazon.awssdk.core.client.handler.ClientExecutionParams; import software.amazon.awssdk.core.http.HttpResponseHandler; import software.amazon.awssdk.protocols.xml.AwsXmlProtocolFactory; import software.amazon.awssdk.protocols.xml.XmlOperationMetadata; @@ -71,13 +73,9 @@ public CodeBlock responseHandler(IntermediateModel model, ParameterizedTypeName.get(ClassName.get(software.amazon.awssdk.core.Response.class), responseType)); return CodeBlock.builder() - .addStatement("\n\n$T responseHandler = protocolFactory.createCombinedResponseHandler" - + "($T::builder," + .addStatement("\n\n$T responseHandler = protocolFactory.createCombinedResponseHandler($T::builder, " + "new $T().withHasStreamingSuccessResponse($L))", - handlerType, - responseType, - XmlOperationMetadata.class, - opModel.hasStreamingOutput()) + handlerType, responseType, XmlOperationMetadata.class, opModel.hasStreamingOutput()) .build(); } @@ -85,12 +83,9 @@ private CodeBlock streamingResponseHandler(OperationModel opModel) { ClassName responseType = poetExtensions.getModelClass(opModel.getReturnType().getReturnType()); return CodeBlock.builder() - .addStatement("\n\n$T<$T> responseHandler = protocolFactory.createResponseHandler($T::builder," + .addStatement("\n\n$T<$T> responseHandler = protocolFactory.createResponseHandler($T::builder, " + "new $T().withHasStreamingSuccessResponse($L))", - HttpResponseHandler.class, - responseType, - responseType, - XmlOperationMetadata.class, + HttpResponseHandler.class, responseType, responseType, XmlOperationMetadata.class, opModel.hasStreamingOutput()) .build(); } @@ -113,23 +108,16 @@ public CodeBlock executionHandler(OperationModel opModel) { TypeName responseType = poetExtensions.getModelClass(opModel.getReturnType().getReturnType()); ClassName requestType = poetExtensions.getModelClass(opModel.getInput().getVariableType()); ClassName marshaller = poetExtensions.getTransformClass(opModel.getInputShape().getShapeName() + "Marshaller"); - CodeBlock.Builder codeBlock = CodeBlock - .builder() - .add("\n\nreturn clientHandler.execute(new $T<$T, $T>()" + - ".withOperationName(\"$N\")\n" + - ".withCombinedResponseHandler($N)" + - ".withMetricCollector(apiCallMetricCollector)\n" + - hostPrefixExpression(opModel) + - discoveredEndpoint(opModel) + - ".withInput($L)", - software.amazon.awssdk.core.client.handler.ClientExecutionParams.class, - requestType, - responseType, - opModel.getOperationName(), - "responseHandler", - opModel.getInput().getVariableName()); - - codeBlock.add(".withMetricCollector($N)", "apiCallMetricCollector"); + CodeBlock.Builder codeBlock = CodeBlock.builder() + .add("\n\nreturn clientHandler.execute(new $T<$T, $T>()\n", + ClientExecutionParams.class, requestType, responseType) + .add(".withOperationName($S)\n", opModel.getOperationName()) + .add(".withCombinedResponseHandler(responseHandler)\n") + .add(".withMetricCollector(apiCallMetricCollector)\n" + + hostPrefixExpression(opModel) + + discoveredEndpoint(opModel)) + .add(".withInput($L)", opModel.getInput().getVariableName()) + .add(HttpChecksumRequiredTrait.putHttpChecksumAttribute(opModel)); s3ArnableFields(opModel, model).ifPresent(codeBlock::add); @@ -173,25 +161,20 @@ public CodeBlock asyncExecutionHandler(IntermediateModel intermediateModel, Oper ClassName requestType = poetExtensions.getModelClass(opModel.getInput().getVariableType()); ClassName marshaller = poetExtensions.getRequestTransformClass(opModel.getInputShape().getShapeName() + "Marshaller"); - String asyncRequestBody = opModel.hasStreamingInput() ? ".withAsyncRequestBody(requestBody)" - : ""; TypeName executeFutureValueType = executeFutureValueType(opModel, poetExtensions); CodeBlock.Builder builder = - CodeBlock.builder().add("\n\n$T<$T> executeFuture = clientHandler.execute(new $T<$T, $T>()" - + "\n" + - ".withOperationName(\"$N\")\n" + - ".withMarshaller($L)" + - ".withCombinedResponseHandler($N)" + - hostPrefixExpression(opModel) + - asyncRequestBody, - java.util.concurrent.CompletableFuture.class, - executeFutureValueType, - software.amazon.awssdk.core.client.handler.ClientExecutionParams.class, - requestType, - pojoResponseType, - opModel.getOperationName(), - asyncMarshaller(intermediateModel, opModel, marshaller, "protocolFactory"), - "responseHandler"); + CodeBlock.builder() + .add("\n\n$T<$T> executeFuture = clientHandler.execute(new $T<$T, $T>()\n", + CompletableFuture.class, executeFutureValueType, + ClientExecutionParams.class, requestType, pojoResponseType) + .add(".withOperationName(\"$N\")\n", opModel.getOperationName()) + .add(".withMarshaller($L)\n", asyncMarshaller(intermediateModel, opModel, marshaller, "protocolFactory")) + .add(".withCombinedResponseHandler(responseHandler)\n") + .add(hostPrefixExpression(opModel)) + .add(".withMetricCollector(apiCallMetricCollector)\n") + .add(asyncRequestBody(opModel)) + .add(HttpChecksumRequiredTrait.putHttpChecksumAttribute(opModel)); + s3ArnableFields(opModel, model).ifPresent(builder::add); builder.add(".withInput($L) $L);", opModel.getInput().getVariableName(), opModel.hasStreamingOutput() ? ", asyncResponseTransformer" : ""); @@ -212,6 +195,10 @@ public CodeBlock asyncExecutionHandler(IntermediateModel intermediateModel, Oper return builder.build(); } + private String asyncRequestBody(OperationModel opModel) { + return opModel.hasStreamingInput() ? ".withAsyncRequestBody(requestBody)" : ""; + } + private CodeBlock asyncStreamingExecutionHandler(IntermediateModel intermediateModel, OperationModel opModel) { return super.asyncExecutionHandler(intermediateModel, opModel); } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/traits/HttpChecksumRequiredTrait.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/traits/HttpChecksumRequiredTrait.java new file mode 100644 index 000000000000..69abf57d5f93 --- /dev/null +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/traits/HttpChecksumRequiredTrait.java @@ -0,0 +1,44 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.poet.client.traits; + +import com.squareup.javapoet.CodeBlock; +import software.amazon.awssdk.codegen.model.intermediate.OperationModel; +import software.amazon.awssdk.core.client.handler.ClientExecutionParams; +import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; +import software.amazon.awssdk.core.interceptor.trait.HttpChecksumRequired; + +/** + * The logic for handling the "httpChecksumRequired" trait within the code generator. + */ +public class HttpChecksumRequiredTrait { + private HttpChecksumRequiredTrait() { + } + + /** + * Generate a ".putExecutionAttribute(...)" code-block for the provided operation model. This should be used within the + * context of initializing {@link ClientExecutionParams}. If HTTP checksums are not required by the operation, this will + * return an empty code-block. + */ + public static CodeBlock putHttpChecksumAttribute(OperationModel operationModel) { + if (operationModel.isHttpChecksumRequired()) { + return CodeBlock.of(".putExecutionAttribute($T.HTTP_CHECKSUM_REQUIRED, $T.create())\n", + SdkInternalExecutionAttribute.class, HttpChecksumRequired.class); + } + + return CodeBlock.of(""); + } +} diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/BeanGetterHelper.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/BeanGetterHelper.java index c9a2db4777fd..d54740b12452 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/BeanGetterHelper.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/BeanGetterHelper.java @@ -63,20 +63,20 @@ public MethodSpec beanStyleGetter(MemberModel memberModel) { private MethodSpec byteBufferGetter(MemberModel memberModel) { return basicGetter(memberModel, ClassName.get(ByteBuffer.class), - CodeBlock.of("return $1N == null ? null : $1N.asByteBuffer()", + CodeBlock.of("return $1N == null ? null : $1N.asByteBuffer();", memberModel.getVariable().getVariableName())); } private MethodSpec listByteBufferGetter(MemberModel memberModel) { return basicGetter(memberModel, ParameterizedTypeName.get(List.class, ByteBuffer.class), - CodeBlock.of("return $1N == null ? null : $1N.stream().map($2T::asByteBuffer).collect($3T.toList())", + CodeBlock.of("return $1N == null ? null : $1N.stream().map($2T::asByteBuffer).collect($3T.toList());", memberModel.getVariable().getVariableName(), SdkBytes.class, Collectors.class)); } private MethodSpec mapByteBufferGetter(MemberModel memberModel) { String body = "return $1N == null ? null : " + - "$1N.entrySet().stream().collect($2T.toMap(e -> e.getKey(), e -> e.getValue().asByteBuffer()))"; + "$1N.entrySet().stream().collect($2T.toMap(e -> e.getKey(), e -> e.getValue().asByteBuffer()));"; String keyType = memberModel.getMapModel().getKeyModel().getVariable().getVariableType(); return basicGetter(memberModel, PoetUtils.createParameterizedTypeName(Map.class, keyType, ByteBuffer.class.getSimpleName()), @@ -86,15 +86,14 @@ private MethodSpec mapByteBufferGetter(MemberModel memberModel) { private MethodSpec regularGetter(MemberModel memberModel) { return basicGetter(memberModel, typeProvider.parameterType(memberModel), - CodeBlock.of("return $N", memberModel.getVariable().getVariableName())); + CodeBlock.of("return $N;", memberModel.getVariable().getVariableName())); } private MethodSpec builderGetter(MemberModel memberModel) { return basicGetter(memberModel, poetExtensions.getModelClass(memberModel.getC2jShape()).nestedClass("Builder"), - CodeBlock.builder().add("return $1N != null ? $1N.toBuilder() : null", - memberModel.getVariable().getVariableName()) - .build()); + CodeBlock.of("return $1N != null ? $1N.toBuilder() : null;", + memberModel.getVariable().getVariableName())); } private MethodSpec mapOfBuildersGetter(MemberModel memberModel) { @@ -105,11 +104,10 @@ private MethodSpec mapOfBuildersGetter(MemberModel memberModel) { return basicGetter(memberModel, returnType, - CodeBlock.builder().add("return $1N != null ? $2T.mapValues($1N, $3T::toBuilder) : null", + CodeBlock.of("return $1N != null ? $2T.mapValues($1N, $3T::toBuilder) : null;", memberModel.getVariable().getVariableName(), CollectionUtils.class, - valueType) - .build()); + valueType)); } private MethodSpec listOfBuildersGetter(MemberModel memberModel) { @@ -118,19 +116,27 @@ private MethodSpec listOfBuildersGetter(MemberModel memberModel) { return basicGetter(memberModel, returnType, - CodeBlock.builder().add( - "return $1N != null ? $1N.stream().map($2T::toBuilder).collect($3T.toList()) : null", - memberModel.getVariable().getVariableName(), - memberType, - Collectors.class) - .build()); + CodeBlock.of("return $1N != null ? $1N.stream().map($2T::toBuilder).collect($3T.toList()) : null;", + memberModel.getVariable().getVariableName(), + memberType, + Collectors.class)); } - private MethodSpec basicGetter(MemberModel memberModel, TypeName returnType, CodeBlock statement) { + private MethodSpec basicGetter(MemberModel memberModel, TypeName returnType, CodeBlock body) { + CodeBlock.Builder getterBody = CodeBlock.builder(); + + memberModel.getAutoConstructClassIfExists().ifPresent(autoConstructClass -> { + getterBody.add("if ($N instanceof $T) {", memberModel.getVariable().getVariableName(), autoConstructClass) + .add("return null;") + .add("}"); + }); + + getterBody.add(body); + return MethodSpec.methodBuilder(memberModel.getBeanStyleGetterMethodName()) .addModifiers(Modifier.PUBLIC, Modifier.FINAL) .returns(returnType) - .addStatement(statement) + .addCode(getterBody.build()) .build(); } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/MemberCopierSpec.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/MemberCopierSpec.java index ab95b64346dd..4825001efe12 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/MemberCopierSpec.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/MemberCopierSpec.java @@ -209,8 +209,9 @@ private MethodSpec builderCopyMethodForMap() { CodeBlock code = CodeBlock.builder() - .beginControlFlow("if ($N == null)", memberParamName()) - .addStatement("return null") + .beginControlFlow("if ($1N == null || $1N instanceof $2T)", + memberParamName(), DefaultSdkAutoConstructMap.class) + .addStatement("return $T.getInstance()", DefaultSdkAutoConstructMap.class) .endControlFlow() .addStatement("return $N($N.entrySet().stream().collect(toMap($T::getKey, e -> e.getValue().build())))", serviceModelCopiers.copyMethodName(), @@ -234,8 +235,9 @@ private MethodSpec builderCopyMethodForList() { ParameterizedTypeName.get(ClassName.get(Collection.class), WildcardTypeName.subtypeOf(builderForParameter)); CodeBlock code = CodeBlock.builder() - .beginControlFlow("if ($N == null)", memberParamName()) - .addStatement("return null") + .beginControlFlow("if ($1N == null || $1N instanceof $2T)", + memberParamName(), DefaultSdkAutoConstructList.class) + .addStatement("return $T.getInstance()", DefaultSdkAutoConstructList.class) .endControlFlow() .addStatement("return $N($N.stream().map($T::$N).collect(toList()))", serviceModelCopiers.copyMethodName(), @@ -270,16 +272,9 @@ private CodeBlock listCopyBody(EnumTransform enumTransform) { CodeBlock.Builder builder = CodeBlock.builder(); - if (typeProvider.useAutoConstructLists()) { - builder.beginControlFlow("if ($1N == null || $1N instanceof $2T)", memberParamName(), SdkAutoConstructList.class) - .addStatement("return $T.getInstance()", DefaultSdkAutoConstructList.class) - .endControlFlow(); - - } else { - builder.beginControlFlow("if ($N == null)", memberParamName()) - .addStatement("return null") - .endControlFlow(); - } + builder.beginControlFlow("if ($1N == null || $1N instanceof $2T)", memberParamName(), SdkAutoConstructList.class) + .addStatement("return $T.getInstance()", DefaultSdkAutoConstructList.class) + .endControlFlow(); Optional copierClass = serviceModelCopiers.copierClassFor(memberModel.getListModel().getListMemberModel()); boolean hasCopier = copierClass.isPresent(); @@ -367,15 +362,9 @@ private CodeBlock mapCopyBody(EnumTransform enumTransform) { CodeBlock valueCopyExpr = mapKeyValCopyExpr(valueModel, "getValue", valueTransform); CodeBlock.Builder builder = CodeBlock.builder(); - if (typeProvider.useAutoConstructMaps()) { - builder.beginControlFlow("if ($1N == null || $1N instanceof $2T)", memberParamName(), SdkAutoConstructMap.class) - .addStatement("return $T.getInstance()", DefaultSdkAutoConstructMap.class) - .endControlFlow(); - } else { - builder.beginControlFlow("if ($1N == null)", memberParamName()) - .addStatement("return null") - .endControlFlow(); - } + builder.beginControlFlow("if ($1N == null || $1N instanceof $2T)", memberParamName(), SdkAutoConstructMap.class) + .addStatement("return $T.getInstance()", DefaultSdkAutoConstructMap.class) + .endControlFlow(); TypeName copyType; if (enumTransform == EnumTransform.STRING_TO_ENUM) { diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/ModelBuilderSpecs.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/ModelBuilderSpecs.java index a4385f6e945e..3d5d7ef992c9 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/ModelBuilderSpecs.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/ModelBuilderSpecs.java @@ -169,11 +169,11 @@ private List fields() { List fields = shapeModel.getNonStreamingMembers().stream() .map(m -> { FieldSpec fieldSpec = typeProvider.asField(m, Modifier.PRIVATE); - if (m.isList() && typeProvider.useAutoConstructLists()) { + if (m.isList()) { fieldSpec = fieldSpec.toBuilder() .initializer("$T.getInstance()", DefaultSdkAutoConstructList.class) .build(); - } else if (m.isMap() && typeProvider.useAutoConstructMaps()) { + } else if (m.isMap()) { fieldSpec = fieldSpec.toBuilder() .initializer("$T.getInstance()", DefaultSdkAutoConstructMap.class) .build(); diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/ModelMethodOverrides.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/ModelMethodOverrides.java index 64b802db329b..dcba831d24b3 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/ModelMethodOverrides.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/ModelMethodOverrides.java @@ -70,7 +70,15 @@ public MethodSpec equalsBySdkFieldsMethod(ShapeModel shapeModel) { memberEqualsStmt.add("return "); memberEqualsStmt.add(memberModels.stream().map(m -> { String getterName = m.getFluentGetterMethodName(); - return CodeBlock.builder().add("$T.equals($N(), other.$N())", Objects.class, getterName, getterName).build(); + + CodeBlock.Builder result = CodeBlock.builder(); + if (m.getAutoConstructClassIfExists().isPresent()) { + String existenceCheckMethodName = m.getExistenceCheckMethodName(); + result.add("$1N() == other.$1N() && ", existenceCheckMethodName); + } + + return result.add("$T.equals($N(), other.$N())", Objects.class, getterName, getterName) + .build(); }).collect(PoetCollectors.toDelimitedCodeBlock("&&"))); memberEqualsStmt.add(";"); } @@ -119,13 +127,19 @@ public MethodSpec toStringMethod(ShapeModel shapeModel) { } public CodeBlock toStringValue(MemberModel member) { - if (!member.isSensitive()) { - return CodeBlock.of("$L()", member.getFluentGetterMethodName()); + if (member.isSensitive()) { + return CodeBlock.of("$L() == null ? null : $S", + member.getFluentGetterMethodName(), + "*** Sensitive Data Redacted ***"); + } + + if (member.getAutoConstructClassIfExists().isPresent()) { + return CodeBlock.of("$N() ? $N() : null", + member.getExistenceCheckMethodName(), + member.getFluentGetterMethodName()); } - return CodeBlock.of("$L() == null ? null : $S", - member.getFluentGetterMethodName(), - "*** Sensitive Data Redacted ***"); + return CodeBlock.of("$L()", member.getFluentGetterMethodName()); } public MethodSpec hashCodeMethod(ShapeModel shapeModel) { @@ -141,13 +155,22 @@ public MethodSpec hashCodeMethod(ShapeModel shapeModel) { } shapeModel.getNonStreamingMembers() - .forEach(m -> methodBuilder.addStatement( - "hashCode = 31 * hashCode + $T.hashCode($N())", - Objects.class, - m.getFluentGetterMethodName())); + .forEach(m -> methodBuilder.addCode("hashCode = 31 * hashCode + $T.hashCode(", Objects.class) + .addCode(hashCodeValue(m)) + .addCode(");\n")); methodBuilder.addStatement("return hashCode"); return methodBuilder.build(); } + + public CodeBlock hashCodeValue(MemberModel member) { + if (member.getAutoConstructClassIfExists().isPresent()) { + return CodeBlock.of("$N() ? $N() : null", + member.getExistenceCheckMethodName(), + member.getFluentGetterMethodName()); + } + + return CodeBlock.of("$N()", member.getFluentGetterMethodName()); + } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/TypeProvider.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/TypeProvider.java index a759463165a6..f315bc74d15f 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/TypeProvider.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/TypeProvider.java @@ -53,14 +53,6 @@ public ClassName listImplClassName() { return ClassName.get(ArrayList.class); } - public boolean useAutoConstructLists() { - return intermediateModel.getCustomizationConfig().isUseAutoConstructList(); - } - - public boolean useAutoConstructMaps() { - return intermediateModel.getCustomizationConfig().isUseAutoConstructMap(); - } - public TypeName enumReturnType(MemberModel memberModel) { return fieldType(memberModel, true); } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/transform/MarshallerSpec.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/transform/MarshallerSpec.java index ddf7f221f386..3024048cd15c 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/transform/MarshallerSpec.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/transform/MarshallerSpec.java @@ -128,8 +128,6 @@ private MarshallerProtocolSpec getProtocolSpecs(software.amazon.awssdk.codegen.m case REST_XML: return new XmlMarshallerSpec(intermediateModel, shapeModel); - case API_GATEWAY: - throw new UnsupportedOperationException("Not yet supported."); default: throw new RuntimeException("Unknown protocol: " + protocol.name()); } diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/customization/processors/UseLegacyEventGenerationSchemeProcessorTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/customization/processors/UseLegacyEventGenerationSchemeProcessorTest.java index c853141d1fef..ff041882bbc7 100644 --- a/codegen/src/test/java/software/amazon/awssdk/codegen/customization/processors/UseLegacyEventGenerationSchemeProcessorTest.java +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/customization/processors/UseLegacyEventGenerationSchemeProcessorTest.java @@ -68,15 +68,11 @@ public void testPostProcess_customizationIsValid_succeeds() { private static IntermediateModel intermediateModelWithConfig(String configName) { - IntermediateModel intermediateModel = new IntermediateModelBuilder(C2jModels.builder() + return new IntermediateModelBuilder(C2jModels.builder() .serviceModel(serviceModel) - .customizationConfig(CustomizationConfig.create()) + .customizationConfig(loadCustomizationConfig(configName)) .build()) .build(); - - intermediateModel.setCustomizationConfig(loadCustomizationConfig(configName)); - - return intermediateModel; } private static CustomizationConfig loadCustomizationConfig(String configName) { diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/naming/DefaultNamingStrategyTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/naming/DefaultNamingStrategyTest.java index f30c642cbeb6..d3da6d10a28e 100644 --- a/codegen/src/test/java/software/amazon/awssdk/codegen/naming/DefaultNamingStrategyTest.java +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/naming/DefaultNamingStrategyTest.java @@ -15,7 +15,9 @@ package software.amazon.awssdk.codegen.naming; +import static java.util.Collections.singletonList; import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; import static org.mockito.Matchers.any; import static org.mockito.Matchers.eq; import static org.mockito.Mockito.mock; @@ -24,15 +26,20 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Map; - +import java.util.function.Consumer; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; import software.amazon.awssdk.codegen.model.config.customization.CustomizationConfig; +import software.amazon.awssdk.codegen.model.config.customization.UnderscoresInNameBehavior; import software.amazon.awssdk.codegen.model.config.customization.ShareModelConfig; +import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; import software.amazon.awssdk.codegen.model.intermediate.MemberModel; +import software.amazon.awssdk.codegen.model.intermediate.Metadata; +import software.amazon.awssdk.codegen.model.intermediate.OperationModel; +import software.amazon.awssdk.codegen.model.intermediate.ShapeModel; import software.amazon.awssdk.codegen.model.service.Member; import software.amazon.awssdk.codegen.model.service.ServiceMetadata; import software.amazon.awssdk.codegen.model.service.ServiceModel; @@ -40,6 +47,7 @@ @RunWith(MockitoJUnitRunner.class) public class DefaultNamingStrategyTest { + private CustomizationConfig customizationConfig = CustomizationConfig.create(); private ServiceModel serviceModel = mock(ServiceModel.class); @@ -61,23 +69,10 @@ public class DefaultNamingStrategyTest { @Mock private ServiceMetadata serviceMetadata; - private DefaultNamingStrategy strat = new DefaultNamingStrategy(serviceModel, null); + private DefaultNamingStrategy strat = new DefaultNamingStrategy(serviceModel, customizationConfig); @Before public void setUp() { - - } - - @Test - public void canConvertStringsWithNonAlphasToClassNames() { - String anInvalidClassName = "a phrase-With_other.delimiters"; - assertThat(strat.getJavaClassName(anInvalidClassName)).isEqualTo("APhraseWithOtherDelimiters"); - } - - @Test - public void canConvertAuthorizerStartingWithNumber() { - String anInvalidClassName = "35-authorizer-implementation"; - assertThat(strat.getAuthorizerClassName(anInvalidClassName)).isEqualTo("I35AuthorizerImplementation"); } @Test @@ -323,51 +318,64 @@ private void validateConversion(String input, String expectedOutput) { } @Test - public void getJavaClassName_ReturnsSanitizedName_ClassStartingWithUnderscore() { - NamingStrategy strategy = new DefaultNamingStrategy(null, null); - String javaClassName = strategy.getJavaClassName("_MyClass"); - assertThat(javaClassName).isEqualTo("MyClass"); + public void validateDisallowsUnderscoresWithCustomization() { + String invalidName = "foo_bar"; + verifyFailure(i -> i.getMetadata().setAsyncBuilderInterface(invalidName)); + verifyFailure(i -> i.getMetadata().setSyncBuilderInterface(invalidName)); + verifyFailure(i -> i.getMetadata().setAsyncInterface(invalidName)); + verifyFailure(i -> i.getMetadata().setSyncInterface(invalidName)); + verifyFailure(i -> i.getMetadata().setBaseBuilderInterface(invalidName)); + verifyFailure(i -> i.getMetadata().setBaseExceptionName(invalidName)); + verifyFailure(i -> i.getOperations().put(invalidName, opModel(o -> o.setOperationName(invalidName)))); + verifyFailure(i -> i.getWaiters().put(invalidName, null)); + verifyFailure(i -> i.getShapes().put(invalidName, shapeModel(s -> s.setShapeName(invalidName)))); + verifyFailure(i -> i.getShapes().put(invalidName, shapeWithMember(m -> m.setBeanStyleGetterMethodName(invalidName)))); + verifyFailure(i -> i.getShapes().put(invalidName, shapeWithMember(m -> m.setBeanStyleSetterMethodName(invalidName)))); + verifyFailure(i -> i.getShapes().put(invalidName, shapeWithMember(m -> m.setFluentEnumGetterMethodName(invalidName)))); + verifyFailure(i -> i.getShapes().put(invalidName, shapeWithMember(m -> m.setFluentEnumSetterMethodName(invalidName)))); + verifyFailure(i -> i.getShapes().put(invalidName, shapeWithMember(m -> m.setFluentGetterMethodName(invalidName)))); + verifyFailure(i -> i.getShapes().put(invalidName, shapeWithMember(m -> m.setFluentSetterMethodName(invalidName)))); + verifyFailure(i -> i.getShapes().put(invalidName, shapeWithMember(m -> m.setEnumType(invalidName)))); } @Test - public void getJavaClassName_ReturnsSanitizedName_ClassStartingWithDoubleUnderscore() { - NamingStrategy strategy = new DefaultNamingStrategy(null, null); - String javaClassName = strategy.getJavaClassName("__MyClass"); - assertThat(javaClassName).isEqualTo("MyClass"); - } + public void validateAllowsUnderscoresWithCustomization() { + CustomizationConfig customization = + CustomizationConfig.create() + .withUnderscoresInShapeNameBehavior(UnderscoresInNameBehavior.ALLOW); + NamingStrategy strategy = new DefaultNamingStrategy(serviceModel, customization); - @Test - public void getJavaClassName_ReturnsSanitizedName_ClassStartingWithDoublePeriods() { - NamingStrategy strategy = new DefaultNamingStrategy(null, null); - String javaClassName = strategy.getJavaClassName("..MyClass"); - assertThat(javaClassName).isEqualTo("MyClass"); + Metadata metadata = new Metadata(); + metadata.setAsyncBuilderInterface("foo_bar"); + + IntermediateModel model = new IntermediateModel(); + model.setMetadata(metadata); + + strategy.validateCustomerVisibleNaming(model); } - @Test - public void getJavaClassName_ReturnsSanitizedName_ClassStartingWithDoubleDashes() { - NamingStrategy strategy = new DefaultNamingStrategy(null, null); - String javaClassName = strategy.getJavaClassName("--MyClass"); - assertThat(javaClassName).isEqualTo("MyClass"); + private void verifyFailure(Consumer modelModifier) { + IntermediateModel model = new IntermediateModel(); + model.setMetadata(new Metadata()); + modelModifier.accept(model); + assertThatThrownBy(() -> strat.validateCustomerVisibleNaming(model)).isInstanceOf(RuntimeException.class); } - @Test - public void getJavaClassName_ReturnsSanitizedName_DoubleUnderscoresInClass() { - NamingStrategy strategy = new DefaultNamingStrategy(null, null); - String javaClassName = strategy.getJavaClassName("My__Class"); - assertThat(javaClassName).isEqualTo("MyClass"); + private OperationModel opModel(Consumer operationModifier) { + OperationModel model = new OperationModel(); + operationModifier.accept(model); + return model; } - @Test - public void getJavaClassName_ReturnsSanitizedName_DoublePeriodsInClass() { - NamingStrategy strategy = new DefaultNamingStrategy(null, null); - String javaClassName = strategy.getJavaClassName("My..Class"); - assertThat(javaClassName).isEqualTo("MyClass"); + private ShapeModel shapeModel(Consumer shapeModifier) { + ShapeModel model = new ShapeModel(); + shapeModifier.accept(model); + return model; } - @Test - public void getJavaClassName_ReturnsSanitizedName_DoubleDashesInClass() { - NamingStrategy strategy = new DefaultNamingStrategy(null, null); - String javaClassName = strategy.getJavaClassName("My--Class"); - assertThat(javaClassName).isEqualTo("MyClass"); + private ShapeModel shapeWithMember(Consumer memberModifier) { + MemberModel model = new MemberModel(); + memberModifier.accept(model); + return shapeModel(s -> s.setMembers(singletonList(model))); } } diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/poet/ClientTestModels.java b/codegen/src/test/java/software/amazon/awssdk/codegen/poet/ClientTestModels.java index 25c34a37d3d4..6bc0740757ff 100644 --- a/codegen/src/test/java/software/amazon/awssdk/codegen/poet/ClientTestModels.java +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/poet/ClientTestModels.java @@ -59,6 +59,20 @@ public static IntermediateModel queryServiceModels() { return new IntermediateModelBuilder(models).build(); } + public static IntermediateModel xmlServiceModels() { + File serviceModel = new File(ClientTestModels.class.getResource("client/c2j/xml/service-2.json").getFile()); + File customizationModel = new File(ClientTestModels.class.getResource("client/c2j/xml/customization.config").getFile()); + + + C2jModels models = C2jModels + .builder() + .serviceModel(getServiceModel(serviceModel)) + .customizationConfig(getCustomizationConfig(customizationModel)) + .build(); + + return new IntermediateModelBuilder(models).build(); + } + public static IntermediateModel endpointDiscoveryModels() { File serviceModel = new File(ClientTestModels.class.getResource("client/c2j/endpointdiscovery/service-2.json").getFile()); File customizationModel = new File(ClientTestModels.class.getResource("client/c2j/endpointdiscovery/customization.config").getFile()); diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/poet/client/PoetClientFunctionalTests.java b/codegen/src/test/java/software/amazon/awssdk/codegen/poet/client/PoetClientFunctionalTests.java index bddf732ff708..7a0fd95ff76a 100644 --- a/codegen/src/test/java/software/amazon/awssdk/codegen/poet/client/PoetClientFunctionalTests.java +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/poet/client/PoetClientFunctionalTests.java @@ -65,6 +65,19 @@ public void asyncClientClassQuery() throws Exception { assertThat(syncClientClass, generatesTo("test-query-async-client-class.java")); } + @Test + public void syncClientClassXml() throws Exception { + SyncClientClass syncClientClass = createSyncClientClass(ClientTestModels.xmlServiceModels()); + assertThat(syncClientClass, generatesTo("test-xml-client-class.java")); + } + + + @Test + public void asyncClientClassXml() throws Exception { + AsyncClientClass syncClientClass = createAsyncClientClass(ClientTestModels.xmlServiceModels()); + assertThat(syncClientClass, generatesTo("test-xml-async-client-class.java")); + } + private SyncClientClass createSyncClientClass(IntermediateModel model) { return new SyncClientClass(GeneratorTaskParams.create(model, "sources/", "tests/")); } diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/poet/eventstream/EventModelSpecTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/poet/eventstream/EventModelSpecTest.java index bb018a062350..8a30f15bfede 100644 --- a/codegen/src/test/java/software/amazon/awssdk/codegen/poet/eventstream/EventModelSpecTest.java +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/poet/eventstream/EventModelSpecTest.java @@ -14,6 +14,7 @@ import org.junit.runners.Parameterized; import software.amazon.awssdk.codegen.C2jModels; import software.amazon.awssdk.codegen.IntermediateModelBuilder; +import software.amazon.awssdk.codegen.customization.processors.UseLegacyEventGenerationSchemeProcessorTest; import software.amazon.awssdk.codegen.model.config.customization.CustomizationConfig; import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; import software.amazon.awssdk.codegen.model.intermediate.MemberModel; @@ -58,14 +59,20 @@ private String referenceFileForShape() { } private static void setUp() { - File serviceModelFile = new File(AwsModelSpecTest.class.getResource("service-2.json").getFile()); + File serviceModelFile = new File(EventModelSpecTest.class.getResource("service-2.json").getFile()); ServiceModel serviceModel = ModelLoaderUtils.loadModel(ServiceModel.class, serviceModelFile); intermediateModel = new IntermediateModelBuilder( C2jModels.builder() .serviceModel(serviceModel) - .customizationConfig(CustomizationConfig.create()) + .customizationConfig(loadCustomizationConfig("customization.config")) .build()) .build(); } + + private static CustomizationConfig loadCustomizationConfig(String configName) { + String c2jFilePath = EventModelSpecTest.class.getResource(configName).getFile(); + File file = new File(c2jFilePath); + return ModelLoaderUtils.loadModel(CustomizationConfig.class, file); + } } diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/poet/model/AwsModelSpecWithoutAutoConstructContainersTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/poet/model/AwsModelSpecWithoutAutoConstructContainersTest.java deleted file mode 100644 index 834b9fc6ceb2..000000000000 --- a/codegen/src/test/java/software/amazon/awssdk/codegen/poet/model/AwsModelSpecWithoutAutoConstructContainersTest.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.codegen.poet.model; - -import static java.util.stream.Collectors.toList; -import static org.hamcrest.MatcherAssert.assertThat; -import static software.amazon.awssdk.codegen.poet.PoetMatchers.generatesTo; -import static software.amazon.awssdk.utils.FunctionalUtils.invokeSafely; - -import java.io.File; -import java.io.IOException; -import java.util.Collection; -import java.util.Locale; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import software.amazon.awssdk.codegen.C2jModels; -import software.amazon.awssdk.codegen.IntermediateModelBuilder; -import software.amazon.awssdk.codegen.model.config.customization.CustomizationConfig; -import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; -import software.amazon.awssdk.codegen.model.intermediate.ShapeModel; -import software.amazon.awssdk.codegen.model.service.ServiceModel; -import software.amazon.awssdk.codegen.utils.ModelLoaderUtils; - -/** - * Similar to {@link AwsModelSpecTest} but tests correct generation when auto construct containers are disabled. - */ -@RunWith(Parameterized.class) -public class AwsModelSpecWithoutAutoConstructContainersTest { - private static IntermediateModel intermediateModel; - - private final ShapeModel shapeModel; - - @Parameterized.Parameters(name = "{0}") - public static Collection data() { - invokeSafely(AwsModelSpecWithoutAutoConstructContainersTest::setUp); - return intermediateModel.getShapes().values().stream().map(shape -> new Object[] { shape }).collect(toList()); - } - - public AwsModelSpecWithoutAutoConstructContainersTest(ShapeModel shapeModel) { - this.shapeModel = shapeModel; - } - - @Test - public void generationWithAutoConstructList() { - assertThat(new AwsServiceModel(intermediateModel, shapeModel), generatesTo(referenceFileForShape())); - } - - private String referenceFileForShape() { - String name = shapeModel.getShapeName().toLowerCase(Locale.ENGLISH) + ".java"; - String autoConstructVariant = "./nonautoconstructcontainers/" + name; - if (getClass().getResource(autoConstructVariant) != null) { - return autoConstructVariant; - } - return name; - } - - private static void setUp() throws IOException { - File serviceModelFile = new File(AwsModelSpecWithoutAutoConstructContainersTest.class.getResource("service-2.json").getFile()); - File customizationConfigFile = new File(AwsModelSpecWithoutAutoConstructContainersTest.class - .getResource("customization.config") - .getFile()); - ServiceModel serviceModel = ModelLoaderUtils.loadModel(ServiceModel.class, serviceModelFile); - CustomizationConfig autoConstructListConfig = ModelLoaderUtils.loadModel(CustomizationConfig.class, customizationConfigFile); - autoConstructListConfig.setUseAutoConstructList(false); - autoConstructListConfig.setUseAutoConstructMap(false); - - intermediateModel = new IntermediateModelBuilder( - C2jModels.builder() - .serviceModel(serviceModel) - .customizationConfig(autoConstructListConfig) - .build()) - .build(); - - } -} diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/poet/model/ModelCopierSpecWithoutAutoConstructContainersTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/poet/model/ModelCopierSpecWithoutAutoConstructContainersTest.java deleted file mode 100644 index 27f1cbb6eabe..000000000000 --- a/codegen/src/test/java/software/amazon/awssdk/codegen/poet/model/ModelCopierSpecWithoutAutoConstructContainersTest.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.codegen.poet.model; - -import static java.util.stream.Collectors.toList; -import static org.hamcrest.MatcherAssert.assertThat; -import static software.amazon.awssdk.codegen.poet.PoetMatchers.generatesTo; -import static software.amazon.awssdk.utils.FunctionalUtils.invokeSafely; - -import java.io.File; -import java.io.IOException; -import java.net.URISyntaxException; -import java.util.Collection; -import java.util.Locale; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import software.amazon.awssdk.codegen.C2jModels; -import software.amazon.awssdk.codegen.IntermediateModelBuilder; -import software.amazon.awssdk.codegen.model.config.customization.CustomizationConfig; -import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; -import software.amazon.awssdk.codegen.model.service.ServiceModel; -import software.amazon.awssdk.codegen.poet.ClassSpec; -import software.amazon.awssdk.codegen.utils.ModelLoaderUtils; - -/** - * Similar to {@link ModelCopierSpecTest} but tests correct generation when auto construct containers are disabled. - */ -@RunWith(Parameterized.class) -public class ModelCopierSpecWithoutAutoConstructContainersTest { - private static File serviceModelFile; - private static IntermediateModel intermediateModel; - private final ClassSpec spec; - private final String specName; - - private static void setUp() throws URISyntaxException, IOException { - serviceModelFile = new File(AwsModelSpecTest.class - .getResource("service-2.json") - .getFile()); - - File customizationConfigFile = new File(AwsModelSpecTest.class - .getResource("customization.config") - .getFile()); - - CustomizationConfig customizationConfig = ModelLoaderUtils.loadModel(CustomizationConfig.class, customizationConfigFile); - customizationConfig.setUseAutoConstructList(false); - customizationConfig.setUseAutoConstructMap(false); - - intermediateModel = new IntermediateModelBuilder( - C2jModels.builder() - .serviceModel(ModelLoaderUtils.loadModel(ServiceModel.class, serviceModelFile)) - .customizationConfig(customizationConfig) - .build()) - .build(); - } - - @Parameterized.Parameters(name = "{1}") - public static Collection data() { - invokeSafely(ModelCopierSpecWithoutAutoConstructContainersTest::setUp); - return new ServiceModelCopiers(intermediateModel).copierSpecs().stream() - .map(spec -> new Object[] { spec, spec.className().simpleName().toLowerCase(Locale.ENGLISH) }) - .collect(toList()); - } - - public ModelCopierSpecWithoutAutoConstructContainersTest(ClassSpec spec, String specName) { - this.spec = spec; - this.specName = specName; - } - - @Test - public void basicGeneration() { - assertThat(spec, generatesTo(expectedFile())); - } - - private String expectedFile() { - String name = specName + ".java"; - String autoConstructVariant = "./nonautoconstructcontainers/" + name; - if (getClass().getResource(autoConstructVariant) != null) { - return autoConstructVariant; - } - return name; - } -} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/emitters/customizations/processors/uselegacyeventgenerationscheme/happy-case-customization.config b/codegen/src/test/resources/software/amazon/awssdk/codegen/emitters/customizations/processors/uselegacyeventgenerationscheme/happy-case-customization.config index 202834524210..8c8a414fcc24 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/emitters/customizations/processors/uselegacyeventgenerationscheme/happy-case-customization.config +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/emitters/customizations/processors/uselegacyeventgenerationscheme/happy-case-customization.config @@ -1,5 +1,6 @@ { "useLegacyEventGenerationScheme": { "EventStream": ["EventOne"] - } + }, + "underscoresInNameBehavior": "ALLOW" } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/emitters/customizations/processors/uselegacyeventgenerationscheme/service-2.json b/codegen/src/test/resources/software/amazon/awssdk/codegen/emitters/customizations/processors/uselegacyeventgenerationscheme/service-2.json index 4b862fd96839..9d013117fcad 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/emitters/customizations/processors/uselegacyeventgenerationscheme/service-2.json +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/emitters/customizations/processors/uselegacyeventgenerationscheme/service-2.json @@ -404,16 +404,16 @@ "EventOne": { "shape": "EventOne" }, - "event-two": { + "event_two": { "shape": "EventTwo" }, "secondEventOne": { "shape": "EventOne" }, - "second-event-two": { + "second_event_two": { "shape": "EventTwo" }, - "third-event-two-customizedVisitMethod": { + "third_event_two_customizedVisitMethod": { "shape": "EventTwo" } }, diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/json/service-2.json b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/json/service-2.json index 4670a30fdfa3..d2a5578398b7 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/json/service-2.json +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/json/service-2.json @@ -13,6 +13,14 @@ "xmlNamespace": "https://json-service.amazonaws.com/doc/2010-05-08/" }, "operations": { + "OperationWithChecksumRequired": { + "name": "APostOperation", + "http": { + "method": "POST", + "requestUri": "/" + }, + "httpChecksumRequired": true + }, "APostOperation": { "name": "APostOperation", "http": { @@ -404,14 +412,11 @@ "EventOne": { "shape": "EventOne" }, - "event-two": { + "EventTheSecond": { "shape": "EventTwo" }, "secondEventOne": { "shape": "EventOne" - }, - "second-event-two": { - "shape": "EventTwo" } }, "eventstream": true diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/query/service-2.json b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/query/service-2.json index f1771745a533..2cc6c02c3825 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/query/service-2.json +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/query/service-2.json @@ -13,6 +13,14 @@ "xmlNamespace": "https://query-service.amazonaws.com/doc/2010-05-08/" }, "operations": { + "OperationWithChecksumRequired": { + "name": "APostOperation", + "http": { + "method": "POST", + "requestUri": "/" + }, + "httpChecksumRequired": true + }, "APostOperation": { "name": "APostOperation", "http": { diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/xml/customization.config b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/xml/customization.config new file mode 100644 index 000000000000..dda643384baa --- /dev/null +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/xml/customization.config @@ -0,0 +1,5 @@ +{ + "authPolicyActions" : { + "skip" : true + } +} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/xml/service-2.json b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/xml/service-2.json new file mode 100644 index 000000000000..bbf395164f8a --- /dev/null +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/xml/service-2.json @@ -0,0 +1,193 @@ +{ + "version": "2.0", + "metadata": { + "apiVersion": "2010-05-08", + "endpointPrefix": "xml-service", + "globalEndpoint": "xml-service.amazonaws.com", + "protocol": "rest-xml", + "serviceAbbreviation": "xml Service", + "serviceFullName": "Some Service That Uses xml Protocol", + "serviceId":"Xml Service", + "signatureVersion": "v4", + "uid": "xml-service-2010-05-08", + "xmlNamespace": "https://xml-service.amazonaws.com/doc/2010-05-08/" + }, + "operations": { + "OperationWithChecksumRequired": { + "name": "APostOperation", + "http": { + "method": "POST", + "requestUri": "/" + }, + "httpChecksumRequired": true + }, + "APostOperation": { + "name": "APostOperation", + "http": { + "method": "POST", + "requestUri": "/" + }, + "endpoint": { + "hostPrefix": "foo-" + }, + "input": { + "shape": "APostOperationRequest" + }, + "errors": [ + { + "shape": "InvalidInputException" + } + ], + "documentation": "

Performs a post operation to the xml service and has no output

" + }, + "APostOperationWithOutput": { + "name": "APostOperation", + "http": { + "method": "POST", + "requestUri": "/" + }, + "input": { + "shape": "APostOperationWithOutputRequest" + }, + "output": { + "shape": "APostOperationWithOutputResponse", + "resultWrapper": "APostOperationWithOutputResult" + }, + "errors": [ + { + "shape": "InvalidInputException" + } + ], + "documentation": "

Performs a post operation to the xml service and has modelled output

" + }, + "StreamingInputOperation": { + "name": "StreamingInputOperation", + "http": { + "method": "POST", + "requestUri": "/2016-03-11/streamingInputOperation" + }, + "input": { + "shape": "StructureWithStreamingMember" + }, + "documentation": "Some operation with a streaming input" + }, + "StreamingOutputOperation": { + "name": "StreamingOutputOperation", + "http": { + "method": "POST", + "requestUri": "/2016-03-11/streamingOutputOperation" + }, + "output": { + "shape": "StructureWithStreamingMember" + }, + "documentation": "Some operation with a streaming output" + } + }, + "shapes": { + "APostOperationRequest": { + "type": "structure", + "required": [ + "SomeNestedMember" + ], + "members": { + "SomeNestedMember": { + "shape": "nestedMember", + "documentation": "

a member that has nested members

" + }, + "OptionalMember": { + "shape": "dateType", + "documentation": "

An optional member

" + } + } + }, + "APostOperationWithOutputRequest": { + "type": "structure", + "required": [ + "SomeNestedMember" + ], + "members": { + "SomeNestedMember": { + "shape": "nestedMember", + "documentation": "

a member that has nested members

" + }, + "OptionalMember": { + "shape": "dateType", + "documentation": "

An optional member

" + } + } + }, + "APostOperationWithOutputResponse": { + "type": "structure", + "required": [ + "NestedMember" + ], + "members": { + "NestedMember": { + "shape": "nestedMember", + "documentation": "

A structure containing nested members

" + } + }, + "documentation": "

Contains the response to a successful APostOperationWithOutput request.

" + }, + "InvalidInputException": { + "type": "structure", + "members": { + "message": { + "shape": "invalidInputMessage" + } + }, + "documentation": "

The request was rejected because an invalid or out-of-range value was supplied for an input parameter.

", + "error": { + "code": "InvalidInput", + "httpStatusCode": 400, + "senderFault": true + }, + "exception": true + }, + "nestedMember": { + "type": "structure", + "required": [ + "SubMember", + "CreateDate" + ], + "members": { + "SubMember": { + "shape": "subMember", + "documentation": "

A sub-member

" + }, + "CreateDate": { + "shape": "dateType", + "documentation": "

The date and time, in ISO 8601 date-time format, when the member was created.

" + } + }, + "documentation": "

A shape with nested sub-members" + }, + "subMember": { + "type": "string", + "max": 63, + "min": 3, + "pattern": "^[a-z0-9](([a-z0-9]|-(?!-))*[a-z0-9])?$" + }, + "dateType": { + "type": "timestamp" + }, + "invalidInputMessage": { + "type": "string" + }, + "StreamType": { + "type": "blob", + "streaming": true + }, + "StructureWithStreamingMember": { + "type": "structure", + "members": { + "StreamingMember": { + "shape": "StreamType", + "documentation": "This be a stream" + } + }, + "payload": "StreamingMember" + } + }, + "documentation": "A service that is implemented using the xml protocol" +} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-async-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-async-client-class.java index f8b0a8eac9c0..721b3fa897e4 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-async-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-async-client-class.java @@ -38,6 +38,8 @@ import software.amazon.awssdk.core.client.handler.AttachHttpMetadataResponseHandler; import software.amazon.awssdk.core.client.handler.ClientExecutionParams; import software.amazon.awssdk.core.http.HttpResponseHandler; +import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; +import software.amazon.awssdk.core.interceptor.trait.HttpChecksumRequired; import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.core.protocol.VoidSdkResponse; import software.amazon.awssdk.core.runtime.transform.AsyncStreamingRequestMarshaller; @@ -75,6 +77,8 @@ import software.amazon.awssdk.services.json.model.InvalidInputException; import software.amazon.awssdk.services.json.model.JsonException; import software.amazon.awssdk.services.json.model.JsonRequest; +import software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredRequest; +import software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredResponse; import software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyRequest; import software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyResponse; import software.amazon.awssdk.services.json.model.PaginatedOperationWithoutResultKeyRequest; @@ -95,6 +99,7 @@ import software.amazon.awssdk.services.json.transform.GetWithoutRequiredMembersRequestMarshaller; import software.amazon.awssdk.services.json.transform.InputEventMarshaller; import software.amazon.awssdk.services.json.transform.InputEventTwoMarshaller; +import software.amazon.awssdk.services.json.transform.OperationWithChecksumRequiredRequestMarshaller; import software.amazon.awssdk.services.json.transform.PaginatedOperationWithResultKeyRequestMarshaller; import software.amazon.awssdk.services.json.transform.PaginatedOperationWithoutResultKeyRequestMarshaller; import software.amazon.awssdk.services.json.transform.StreamingInputOperationRequestMarshaller; @@ -300,9 +305,8 @@ public CompletableFuture eventStreamOperation(EventStreamOperationRequest HttpResponseHandler eventResponseHandler = protocolFactory.createResponseHandler( JsonOperationMetadata.builder().isPayloadJson(true).hasStreamingSuccessResponse(false).build(), EventStreamTaggedUnionPojoSupplier.builder().putSdkPojoSupplier("EventOne", EventOne::builder) - .putSdkPojoSupplier("event-two", EventTwo::builder) + .putSdkPojoSupplier("EventTheSecond", EventTwo::builder) .putSdkPojoSupplier("secondEventOne", EventOne::builder) - .putSdkPojoSupplier("second-event-two", EventTwo::builder) .defaultSdkPojoSupplier(() -> new SdkPojoBuilder(EventStream.UNKNOWN)).build()); HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, @@ -327,10 +331,10 @@ public CompletableFuture eventStreamOperation(EventStreamOperationRequest new ClientExecutionParams() .withOperationName("EventStreamOperation") .withMarshaller(new EventStreamOperationRequestMarshaller(protocolFactory)) - .withAsyncRequestBody(software.amazon.awssdk.core.async.AsyncRequestBody.fromPublisher(adapted)) - .withFullDuplex(true).withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler).withMetricCollector(apiCallMetricCollector) - .withInput(eventStreamOperationRequest), restAsyncResponseTransformer); + .withAsyncRequestBody(AsyncRequestBody.fromPublisher(adapted)).withFullDuplex(true) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withMetricCollector(apiCallMetricCollector).withInput(eventStreamOperationRequest), + restAsyncResponseTransformer); AwsRequestOverrideConfiguration requestOverrideConfig = eventStreamOperationRequest.overrideConfiguration().orElse( null); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { @@ -405,9 +409,9 @@ public CompletableFuture eventStreamO .execute(new ClientExecutionParams() .withOperationName("EventStreamOperationWithOnlyInput") .withMarshaller(new EventStreamOperationWithOnlyInputRequestMarshaller(protocolFactory)) - .withAsyncRequestBody(software.amazon.awssdk.core.async.AsyncRequestBody.fromPublisher(adapted)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withMetricCollector(apiCallMetricCollector).withInput(eventStreamOperationWithOnlyInputRequest)); + .withAsyncRequestBody(AsyncRequestBody.fromPublisher(adapted)).withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler).withMetricCollector(apiCallMetricCollector) + .withInput(eventStreamOperationWithOnlyInputRequest)); AwsRequestOverrideConfiguration requestOverrideConfig = eventStreamOperationWithOnlyInputRequest .overrideConfiguration().orElse(null); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { @@ -464,9 +468,8 @@ public CompletableFuture eventStreamOperationWithOnlyOutput( HttpResponseHandler eventResponseHandler = protocolFactory.createResponseHandler( JsonOperationMetadata.builder().isPayloadJson(true).hasStreamingSuccessResponse(false).build(), EventStreamTaggedUnionPojoSupplier.builder().putSdkPojoSupplier("EventOne", EventOne::builder) - .putSdkPojoSupplier("event-two", EventTwo::builder) + .putSdkPojoSupplier("EventTheSecond", EventTwo::builder) .putSdkPojoSupplier("secondEventOne", EventOne::builder) - .putSdkPojoSupplier("second-event-two", EventTwo::builder) .defaultSdkPojoSupplier(() -> new SdkPojoBuilder(EventStream.UNKNOWN)).build()); HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, @@ -573,6 +576,67 @@ public CompletableFuture getWithoutRequiredMe } } + /** + * Invokes the OperationWithChecksumRequired operation asynchronously. + * + * @param operationWithChecksumRequiredRequest + * @return A Java Future containing the result of the OperationWithChecksumRequired operation returned by the + * service.
+ * The CompletableFuture returned by this method can be completed exceptionally with the following + * exceptions. + *

    + *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). + * Can be used for catch all scenarios.
  • + *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get + * credentials, etc.
  • + *
  • JsonException Base class for all service exceptions. Unknown exceptions will be thrown as an instance + * of this type.
  • + *
+ * @sample JsonAsyncClient.OperationWithChecksumRequired + * @see AWS API Documentation + */ + @Override + public CompletableFuture operationWithChecksumRequired( + OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) { + List metricPublishers = resolveMetricPublishers(clientConfiguration, + operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); + MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector + .create("ApiCall"); + try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithChecksumRequired"); + JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) + .isPayloadJson(true).build(); + + HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( + operationMetadata, OperationWithChecksumRequiredResponse::builder); + + HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, + operationMetadata); + + CompletableFuture executeFuture = clientHandler + .execute(new ClientExecutionParams() + .withOperationName("OperationWithChecksumRequired") + .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, + HttpChecksumRequired.create()).withInput(operationWithChecksumRequiredRequest)); + AwsRequestOverrideConfiguration requestOverrideConfig = operationWithChecksumRequiredRequest.overrideConfiguration() + .orElse(null); + CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + }); + executeFuture = CompletableFutureUtils.forwardExceptionTo(whenCompleted, executeFuture); + return executeFuture; + } catch (Throwable t) { + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + return CompletableFutureUtils.failedFuture(t); + } + } + /** * Some paginated operation with result_key in paginators.json file * @@ -653,7 +717,7 @@ public CompletableFuture paginatedOpera * The following are few ways to use the response class: *

* 1) Using the subscribe helper method - * + * *
      * {@code
      * software.amazon.awssdk.services.json.paginators.PaginatedOperationWithResultKeyPublisher publisher = client.paginatedOperationWithResultKeyPaginator(request);
@@ -663,19 +727,19 @@ public CompletableFuture paginatedOpera
      * 
* * 2) Using a custom subscriber - * + * *
      * {@code
      * software.amazon.awssdk.services.json.paginators.PaginatedOperationWithResultKeyPublisher publisher = client.paginatedOperationWithResultKeyPaginator(request);
      * publisher.subscribe(new Subscriber() {
-     *
+     * 
      * public void onSubscribe(org.reactivestreams.Subscriber subscription) { //... };
-     *
-     *
+     * 
+     * 
      * public void onNext(software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyResponse response) { //... };
      * });}
      * 
- * + * * As the response is a publisher, it can work well with third party reactive streams implementations like RxJava2. *

* Please notice that the configuration of MaxResults won't limit the number of results you get with the @@ -788,7 +852,7 @@ public CompletableFuture paginatedOp * The following are few ways to use the response class: *

* 1) Using the subscribe helper method - * + * *
      * {@code
      * software.amazon.awssdk.services.json.paginators.PaginatedOperationWithoutResultKeyPublisher publisher = client.paginatedOperationWithoutResultKeyPaginator(request);
@@ -798,19 +862,19 @@ public CompletableFuture paginatedOp
      * 
* * 2) Using a custom subscriber - * + * *
      * {@code
      * software.amazon.awssdk.services.json.paginators.PaginatedOperationWithoutResultKeyPublisher publisher = client.paginatedOperationWithoutResultKeyPaginator(request);
      * publisher.subscribe(new Subscriber() {
-     *
+     * 
      * public void onSubscribe(org.reactivestreams.Subscriber subscription) { //... };
-     *
-     *
+     * 
+     * 
      * public void onNext(software.amazon.awssdk.services.json.model.PaginatedOperationWithoutResultKeyResponse response) { //... };
      * });}
      * 
- * + * * As the response is a publisher, it can work well with third party reactive streams implementations like RxJava2. *

* Please notice that the configuration of MaxResults won't limit the number of results you get with the @@ -1129,3 +1193,4 @@ private HttpResponseHandler createErrorResponseHandler(Base return protocolFactory.createErrorResponseHandler(operationMetadata); } } + diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-async-client-interface.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-async-client-interface.java index e97f69cd5896..43455c17f37e 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-async-client-interface.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-async-client-interface.java @@ -22,6 +22,8 @@ import software.amazon.awssdk.services.json.model.GetWithoutRequiredMembersResponse; import software.amazon.awssdk.services.json.model.InputEventStream; import software.amazon.awssdk.services.json.model.InputEventStreamTwo; +import software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredRequest; +import software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredResponse; import software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyRequest; import software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyResponse; import software.amazon.awssdk.services.json.model.PaginatedOperationWithoutResultKeyRequest; @@ -145,7 +147,7 @@ default CompletableFuture aPostOperation(ConsumerAWS API Documentation */ default CompletableFuture aPostOperationWithOutput( - APostOperationWithOutputRequest aPostOperationWithOutputRequest) { + APostOperationWithOutputRequest aPostOperationWithOutputRequest) { throw new UnsupportedOperationException(); } @@ -180,9 +182,9 @@ default CompletableFuture aPostOperationWithOu * target="_top">AWS API Documentation */ default CompletableFuture aPostOperationWithOutput( - Consumer aPostOperationWithOutputRequest) { + Consumer aPostOperationWithOutputRequest) { return aPostOperationWithOutput(APostOperationWithOutputRequest.builder().applyMutation(aPostOperationWithOutputRequest) - .build()); + .build()); } /** @@ -205,7 +207,7 @@ default CompletableFuture aPostOperationWithOu * target="_top">AWS API Documentation */ default CompletableFuture eventStreamOperation(EventStreamOperationRequest eventStreamOperationRequest, - Publisher requestStream, EventStreamOperationResponseHandler asyncResponseHandler) { + Publisher requestStream, EventStreamOperationResponseHandler asyncResponseHandler) { throw new UnsupportedOperationException(); } @@ -235,10 +237,10 @@ default CompletableFuture eventStreamOperation(EventStreamOperationRequest * target="_top">AWS API Documentation */ default CompletableFuture eventStreamOperation( - Consumer eventStreamOperationRequest, Publisher requestStream, - EventStreamOperationResponseHandler asyncResponseHandler) { + Consumer eventStreamOperationRequest, Publisher requestStream, + EventStreamOperationResponseHandler asyncResponseHandler) { return eventStreamOperation(EventStreamOperationRequest.builder().applyMutation(eventStreamOperationRequest).build(), - requestStream, asyncResponseHandler); + requestStream, asyncResponseHandler); } /** @@ -262,8 +264,8 @@ default CompletableFuture eventStreamOperation( * target="_top">AWS API Documentation */ default CompletableFuture eventStreamOperationWithOnlyInput( - EventStreamOperationWithOnlyInputRequest eventStreamOperationWithOnlyInputRequest, - Publisher requestStream) { + EventStreamOperationWithOnlyInputRequest eventStreamOperationWithOnlyInputRequest, + Publisher requestStream) { throw new UnsupportedOperationException(); } @@ -294,11 +296,11 @@ default CompletableFuture eventStream * target="_top">AWS API Documentation */ default CompletableFuture eventStreamOperationWithOnlyInput( - Consumer eventStreamOperationWithOnlyInputRequest, - Publisher requestStream) { + Consumer eventStreamOperationWithOnlyInputRequest, + Publisher requestStream) { return eventStreamOperationWithOnlyInput( - EventStreamOperationWithOnlyInputRequest.builder().applyMutation(eventStreamOperationWithOnlyInputRequest) - .build(), requestStream); + EventStreamOperationWithOnlyInputRequest.builder().applyMutation(eventStreamOperationWithOnlyInputRequest) + .build(), requestStream); } /** @@ -322,8 +324,8 @@ default CompletableFuture eventStream * target="_top">AWS API Documentation */ default CompletableFuture eventStreamOperationWithOnlyOutput( - EventStreamOperationWithOnlyOutputRequest eventStreamOperationWithOnlyOutputRequest, - EventStreamOperationWithOnlyOutputResponseHandler asyncResponseHandler) { + EventStreamOperationWithOnlyOutputRequest eventStreamOperationWithOnlyOutputRequest, + EventStreamOperationWithOnlyOutputResponseHandler asyncResponseHandler) { throw new UnsupportedOperationException(); } @@ -354,11 +356,11 @@ default CompletableFuture eventStreamOperationWithOnlyOutput( * target="_top">AWS API Documentation */ default CompletableFuture eventStreamOperationWithOnlyOutput( - Consumer eventStreamOperationWithOnlyOutputRequest, - EventStreamOperationWithOnlyOutputResponseHandler asyncResponseHandler) { + Consumer eventStreamOperationWithOnlyOutputRequest, + EventStreamOperationWithOnlyOutputResponseHandler asyncResponseHandler) { return eventStreamOperationWithOnlyOutput( - EventStreamOperationWithOnlyOutputRequest.builder().applyMutation(eventStreamOperationWithOnlyOutputRequest) - .build(), asyncResponseHandler); + EventStreamOperationWithOnlyOutputRequest.builder().applyMutation(eventStreamOperationWithOnlyOutputRequest) + .build(), asyncResponseHandler); } /** @@ -385,7 +387,7 @@ default CompletableFuture eventStreamOperationWithOnlyOutput( * target="_top">AWS API Documentation */ default CompletableFuture getWithoutRequiredMembers( - GetWithoutRequiredMembersRequest getWithoutRequiredMembersRequest) { + GetWithoutRequiredMembersRequest getWithoutRequiredMembersRequest) { throw new UnsupportedOperationException(); } @@ -420,9 +422,66 @@ default CompletableFuture getWithoutRequiredM * target="_top">AWS API Documentation */ default CompletableFuture getWithoutRequiredMembers( - Consumer getWithoutRequiredMembersRequest) { + Consumer getWithoutRequiredMembersRequest) { return getWithoutRequiredMembers(GetWithoutRequiredMembersRequest.builder() - .applyMutation(getWithoutRequiredMembersRequest).build()); + .applyMutation(getWithoutRequiredMembersRequest).build()); + } + + /** + * Invokes the OperationWithChecksumRequired operation asynchronously. + * + * @param operationWithChecksumRequiredRequest + * @return A Java Future containing the result of the OperationWithChecksumRequired operation returned by the + * service.
+ * The CompletableFuture returned by this method can be completed exceptionally with the following + * exceptions. + *

    + *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). + * Can be used for catch all scenarios.
  • + *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get + * credentials, etc.
  • + *
  • JsonException Base class for all service exceptions. Unknown exceptions will be thrown as an instance + * of this type.
  • + *
+ * @sample JsonAsyncClient.OperationWithChecksumRequired + * @see AWS API Documentation + */ + default CompletableFuture operationWithChecksumRequired( + OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) { + throw new UnsupportedOperationException(); + } + + /** + * Invokes the OperationWithChecksumRequired operation asynchronously.
+ *

+ * This is a convenience which creates an instance of the {@link OperationWithChecksumRequiredRequest.Builder} + * avoiding the need to create one manually via {@link OperationWithChecksumRequiredRequest#builder()} + *

+ * + * @param operationWithChecksumRequiredRequest + * A {@link Consumer} that will call methods on {@link OperationWithChecksumRequiredRequest.Builder} to + * create a request. + * @return A Java Future containing the result of the OperationWithChecksumRequired operation returned by the + * service.
+ * The CompletableFuture returned by this method can be completed exceptionally with the following + * exceptions. + *
    + *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). + * Can be used for catch all scenarios.
  • + *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get + * credentials, etc.
  • + *
  • JsonException Base class for all service exceptions. Unknown exceptions will be thrown as an instance + * of this type.
  • + *
+ * @sample JsonAsyncClient.OperationWithChecksumRequired + * @see AWS API Documentation + */ + default CompletableFuture operationWithChecksumRequired( + Consumer operationWithChecksumRequiredRequest) { + return operationWithChecksumRequired(OperationWithChecksumRequiredRequest.builder() + .applyMutation(operationWithChecksumRequiredRequest).build()); } /** @@ -446,7 +505,7 @@ default CompletableFuture getWithoutRequiredM * target="_top">AWS API Documentation */ default CompletableFuture paginatedOperationWithResultKey( - PaginatedOperationWithResultKeyRequest paginatedOperationWithResultKeyRequest) { + PaginatedOperationWithResultKeyRequest paginatedOperationWithResultKeyRequest) { throw new UnsupportedOperationException(); } @@ -477,9 +536,9 @@ default CompletableFuture paginatedOper * target="_top">AWS API Documentation */ default CompletableFuture paginatedOperationWithResultKey( - Consumer paginatedOperationWithResultKeyRequest) { + Consumer paginatedOperationWithResultKeyRequest) { return paginatedOperationWithResultKey(PaginatedOperationWithResultKeyRequest.builder() - .applyMutation(paginatedOperationWithResultKeyRequest).build()); + .applyMutation(paginatedOperationWithResultKeyRequest).build()); } /** @@ -653,7 +712,7 @@ default PaginatedOperationWithResultKeyPublisher paginatedOperationWithResultKey * target="_top">AWS API Documentation */ default PaginatedOperationWithResultKeyPublisher paginatedOperationWithResultKeyPaginator( - PaginatedOperationWithResultKeyRequest paginatedOperationWithResultKeyRequest) { + PaginatedOperationWithResultKeyRequest paginatedOperationWithResultKeyRequest) { throw new UnsupportedOperationException(); } @@ -736,9 +795,9 @@ default PaginatedOperationWithResultKeyPublisher paginatedOperationWithResultKey * target="_top">AWS API Documentation */ default PaginatedOperationWithResultKeyPublisher paginatedOperationWithResultKeyPaginator( - Consumer paginatedOperationWithResultKeyRequest) { + Consumer paginatedOperationWithResultKeyRequest) { return paginatedOperationWithResultKeyPaginator(PaginatedOperationWithResultKeyRequest.builder() - .applyMutation(paginatedOperationWithResultKeyRequest).build()); + .applyMutation(paginatedOperationWithResultKeyRequest).build()); } /** @@ -762,7 +821,7 @@ default PaginatedOperationWithResultKeyPublisher paginatedOperationWithResultKey * target="_top">AWS API Documentation */ default CompletableFuture paginatedOperationWithoutResultKey( - PaginatedOperationWithoutResultKeyRequest paginatedOperationWithoutResultKeyRequest) { + PaginatedOperationWithoutResultKeyRequest paginatedOperationWithoutResultKeyRequest) { throw new UnsupportedOperationException(); } @@ -793,9 +852,9 @@ default CompletableFuture paginatedO * target="_top">AWS API Documentation */ default CompletableFuture paginatedOperationWithoutResultKey( - Consumer paginatedOperationWithoutResultKeyRequest) { + Consumer paginatedOperationWithoutResultKeyRequest) { return paginatedOperationWithoutResultKey(PaginatedOperationWithoutResultKeyRequest.builder() - .applyMutation(paginatedOperationWithoutResultKeyRequest).build()); + .applyMutation(paginatedOperationWithoutResultKeyRequest).build()); } /** @@ -871,7 +930,7 @@ default CompletableFuture paginatedO * target="_top">AWS API Documentation */ default PaginatedOperationWithoutResultKeyPublisher paginatedOperationWithoutResultKeyPaginator( - PaginatedOperationWithoutResultKeyRequest paginatedOperationWithoutResultKeyRequest) { + PaginatedOperationWithoutResultKeyRequest paginatedOperationWithoutResultKeyRequest) { throw new UnsupportedOperationException(); } @@ -954,9 +1013,9 @@ default PaginatedOperationWithoutResultKeyPublisher paginatedOperationWithoutRes * target="_top">AWS API Documentation */ default PaginatedOperationWithoutResultKeyPublisher paginatedOperationWithoutResultKeyPaginator( - Consumer paginatedOperationWithoutResultKeyRequest) { + Consumer paginatedOperationWithoutResultKeyRequest) { return paginatedOperationWithoutResultKeyPaginator(PaginatedOperationWithoutResultKeyRequest.builder() - .applyMutation(paginatedOperationWithoutResultKeyRequest).build()); + .applyMutation(paginatedOperationWithoutResultKeyRequest).build()); } /** @@ -984,7 +1043,7 @@ default PaginatedOperationWithoutResultKeyPublisher paginatedOperationWithoutRes * target="_top">AWS API Documentation */ default CompletableFuture streamingInputOperation( - StreamingInputOperationRequest streamingInputOperationRequest, AsyncRequestBody requestBody) { + StreamingInputOperationRequest streamingInputOperationRequest, AsyncRequestBody requestBody) { throw new UnsupportedOperationException(); } @@ -1019,9 +1078,9 @@ default CompletableFuture streamingInputOperati * target="_top">AWS API Documentation */ default CompletableFuture streamingInputOperation( - Consumer streamingInputOperationRequest, AsyncRequestBody requestBody) { + Consumer streamingInputOperationRequest, AsyncRequestBody requestBody) { return streamingInputOperation(StreamingInputOperationRequest.builder().applyMutation(streamingInputOperationRequest) - .build(), requestBody); + .build(), requestBody); } /** @@ -1049,7 +1108,7 @@ default CompletableFuture streamingInputOperati * target="_top">AWS API Documentation */ default CompletableFuture streamingInputOperation( - StreamingInputOperationRequest streamingInputOperationRequest, Path sourcePath) { + StreamingInputOperationRequest streamingInputOperationRequest, Path sourcePath) { return streamingInputOperation(streamingInputOperationRequest, AsyncRequestBody.fromFile(sourcePath)); } @@ -1084,9 +1143,9 @@ default CompletableFuture streamingInputOperati * target="_top">AWS API Documentation */ default CompletableFuture streamingInputOperation( - Consumer streamingInputOperationRequest, Path sourcePath) { + Consumer streamingInputOperationRequest, Path sourcePath) { return streamingInputOperation(StreamingInputOperationRequest.builder().applyMutation(streamingInputOperationRequest) - .build(), sourcePath); + .build(), sourcePath); } /** @@ -1119,8 +1178,8 @@ default CompletableFuture streamingInputOperati * target="_top">AWS API Documentation */ default CompletableFuture streamingInputOutputOperation( - StreamingInputOutputOperationRequest streamingInputOutputOperationRequest, AsyncRequestBody requestBody, - AsyncResponseTransformer asyncResponseTransformer) { + StreamingInputOutputOperationRequest streamingInputOutputOperationRequest, AsyncRequestBody requestBody, + AsyncResponseTransformer asyncResponseTransformer) { throw new UnsupportedOperationException(); } @@ -1160,12 +1219,12 @@ default CompletableFuture streamingInputOutputOperation( * target="_top">AWS API Documentation */ default CompletableFuture streamingInputOutputOperation( - Consumer streamingInputOutputOperationRequest, - AsyncRequestBody requestBody, - AsyncResponseTransformer asyncResponseTransformer) { + Consumer streamingInputOutputOperationRequest, + AsyncRequestBody requestBody, + AsyncResponseTransformer asyncResponseTransformer) { return streamingInputOutputOperation( - StreamingInputOutputOperationRequest.builder().applyMutation(streamingInputOutputOperationRequest).build(), - requestBody, asyncResponseTransformer); + StreamingInputOutputOperationRequest.builder().applyMutation(streamingInputOutputOperationRequest).build(), + requestBody, asyncResponseTransformer); } /** @@ -1197,9 +1256,9 @@ default CompletableFuture streamingInputOutputOperation( * target="_top">AWS API Documentation */ default CompletableFuture streamingInputOutputOperation( - StreamingInputOutputOperationRequest streamingInputOutputOperationRequest, Path sourcePath, Path destinationPath) { + StreamingInputOutputOperationRequest streamingInputOutputOperationRequest, Path sourcePath, Path destinationPath) { return streamingInputOutputOperation(streamingInputOutputOperationRequest, AsyncRequestBody.fromFile(sourcePath), - AsyncResponseTransformer.toFile(destinationPath)); + AsyncResponseTransformer.toFile(destinationPath)); } /** @@ -1237,11 +1296,11 @@ default CompletableFuture streamingInputO * target="_top">AWS API Documentation */ default CompletableFuture streamingInputOutputOperation( - Consumer streamingInputOutputOperationRequest, Path sourcePath, - Path destinationPath) { + Consumer streamingInputOutputOperationRequest, Path sourcePath, + Path destinationPath) { return streamingInputOutputOperation( - StreamingInputOutputOperationRequest.builder().applyMutation(streamingInputOutputOperationRequest).build(), - sourcePath, destinationPath); + StreamingInputOutputOperationRequest.builder().applyMutation(streamingInputOutputOperationRequest).build(), + sourcePath, destinationPath); } /** @@ -1269,8 +1328,8 @@ default CompletableFuture streamingInputO * target="_top">AWS API Documentation */ default CompletableFuture streamingOutputOperation( - StreamingOutputOperationRequest streamingOutputOperationRequest, - AsyncResponseTransformer asyncResponseTransformer) { + StreamingOutputOperationRequest streamingOutputOperationRequest, + AsyncResponseTransformer asyncResponseTransformer) { throw new UnsupportedOperationException(); } @@ -1305,10 +1364,10 @@ default CompletableFuture streamingOutputOperation( * target="_top">AWS API Documentation */ default CompletableFuture streamingOutputOperation( - Consumer streamingOutputOperationRequest, - AsyncResponseTransformer asyncResponseTransformer) { + Consumer streamingOutputOperationRequest, + AsyncResponseTransformer asyncResponseTransformer) { return streamingOutputOperation(StreamingOutputOperationRequest.builder().applyMutation(streamingOutputOperationRequest) - .build(), asyncResponseTransformer); + .build(), asyncResponseTransformer); } /** @@ -1335,7 +1394,7 @@ default CompletableFuture streamingOutputOperation( * target="_top">AWS API Documentation */ default CompletableFuture streamingOutputOperation( - StreamingOutputOperationRequest streamingOutputOperationRequest, Path destinationPath) { + StreamingOutputOperationRequest streamingOutputOperationRequest, Path destinationPath) { return streamingOutputOperation(streamingOutputOperationRequest, AsyncResponseTransformer.toFile(destinationPath)); } @@ -1369,9 +1428,9 @@ default CompletableFuture streamingOutputOpera * target="_top">AWS API Documentation */ default CompletableFuture streamingOutputOperation( - Consumer streamingOutputOperationRequest, Path destinationPath) { + Consumer streamingOutputOperationRequest, Path destinationPath) { return streamingOutputOperation(StreamingOutputOperationRequest.builder().applyMutation(streamingOutputOperationRequest) - .build(), destinationPath); + .build(), destinationPath); } /** diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-class.java index 14c934f5b859..b1c99a29788f 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-class.java @@ -17,6 +17,8 @@ import software.amazon.awssdk.core.client.handler.SyncClientHandler; import software.amazon.awssdk.core.exception.SdkClientException; import software.amazon.awssdk.core.http.HttpResponseHandler; +import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; +import software.amazon.awssdk.core.interceptor.trait.HttpChecksumRequired; import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.core.runtime.transform.StreamingRequestMarshaller; import software.amazon.awssdk.core.signer.Signer; @@ -40,6 +42,8 @@ import software.amazon.awssdk.services.json.model.InvalidInputException; import software.amazon.awssdk.services.json.model.JsonException; import software.amazon.awssdk.services.json.model.JsonRequest; +import software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredRequest; +import software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredResponse; import software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyRequest; import software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyResponse; import software.amazon.awssdk.services.json.model.PaginatedOperationWithoutResultKeyRequest; @@ -55,6 +59,7 @@ import software.amazon.awssdk.services.json.transform.APostOperationRequestMarshaller; import software.amazon.awssdk.services.json.transform.APostOperationWithOutputRequestMarshaller; import software.amazon.awssdk.services.json.transform.GetWithoutRequiredMembersRequestMarshaller; +import software.amazon.awssdk.services.json.transform.OperationWithChecksumRequiredRequestMarshaller; import software.amazon.awssdk.services.json.transform.PaginatedOperationWithResultKeyRequestMarshaller; import software.amazon.awssdk.services.json.transform.PaginatedOperationWithoutResultKeyRequestMarshaller; import software.amazon.awssdk.services.json.transform.StreamingInputOperationRequestMarshaller; @@ -112,32 +117,32 @@ public final String serviceName() { */ @Override public APostOperationResponse aPostOperation(APostOperationRequest aPostOperationRequest) throws InvalidInputException, - AwsServiceException, SdkClientException, JsonException { + AwsServiceException, SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(operationMetadata, - APostOperationResponse::builder); + APostOperationResponse::builder); HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperation"); String hostPrefix = "{StringMember}-foo."; HostnameValidator.validateHostnameCompliant(aPostOperationRequest.stringMember(), "StringMember", - "aPostOperationRequest"); + "aPostOperationRequest"); String resolvedHostExpression = String.format("%s-foo.", aPostOperationRequest.stringMember()); return clientHandler.execute(new ClientExecutionParams() - .withOperationName("APostOperation").withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler).hostPrefixExpression(resolvedHostExpression) - .withInput(aPostOperationRequest).withMetricCollector(apiCallMetricCollector) - .withMarshaller(new APostOperationRequestMarshaller(protocolFactory))); + .withOperationName("APostOperation").withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler).hostPrefixExpression(resolvedHostExpression) + .withInput(aPostOperationRequest).withMetricCollector(apiCallMetricCollector) + .withMarshaller(new APostOperationRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -165,30 +170,30 @@ public APostOperationResponse aPostOperation(APostOperationRequest aPostOperatio */ @Override public APostOperationWithOutputResponse aPostOperationWithOutput( - APostOperationWithOutputRequest aPostOperationWithOutputRequest) throws InvalidInputException, AwsServiceException, - SdkClientException, JsonException { + APostOperationWithOutputRequest aPostOperationWithOutputRequest) throws InvalidInputException, AwsServiceException, + SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, APostOperationWithOutputResponse::builder); + operationMetadata, APostOperationWithOutputResponse::builder); HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationWithOutputRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperationWithOutput"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("APostOperationWithOutput").withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler).withInput(aPostOperationWithOutputRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("APostOperationWithOutput").withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler).withInput(aPostOperationWithOutputRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -216,30 +221,81 @@ public APostOperationWithOutputResponse aPostOperationWithOutput( */ @Override public GetWithoutRequiredMembersResponse getWithoutRequiredMembers( - GetWithoutRequiredMembersRequest getWithoutRequiredMembersRequest) throws InvalidInputException, AwsServiceException, - SdkClientException, JsonException { + GetWithoutRequiredMembersRequest getWithoutRequiredMembersRequest) throws InvalidInputException, AwsServiceException, + SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, GetWithoutRequiredMembersResponse::builder); + operationMetadata, GetWithoutRequiredMembersResponse::builder); HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata); List metricPublishers = resolveMetricPublishers(clientConfiguration, getWithoutRequiredMembersRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetWithoutRequiredMembers"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("GetWithoutRequiredMembers").withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler).withInput(getWithoutRequiredMembersRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new GetWithoutRequiredMembersRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("GetWithoutRequiredMembers").withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler).withInput(getWithoutRequiredMembersRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new GetWithoutRequiredMembersRequestMarshaller(protocolFactory))); + } finally { + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + } + } + + /** + * Invokes the OperationWithChecksumRequired operation. + * + * @param operationWithChecksumRequiredRequest + * @return Result of the OperationWithChecksumRequired operation returned by the service. + * @throws SdkException + * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for + * catch all scenarios. + * @throws SdkClientException + * If any client side error occurs such as an IO related failure, failure to get credentials, etc. + * @throws JsonException + * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. + * @sample JsonClient.OperationWithChecksumRequired + * @see AWS API Documentation + */ + @Override + public OperationWithChecksumRequiredResponse operationWithChecksumRequired( + OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) throws AwsServiceException, + SdkClientException, JsonException { + JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) + .isPayloadJson(true).build(); + + HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( + operationMetadata, OperationWithChecksumRequiredResponse::builder); + + HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, + operationMetadata); + List metricPublishers = resolveMetricPublishers(clientConfiguration, + operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); + MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector + .create("ApiCall"); + try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithChecksumRequired"); + + return clientHandler + .execute(new ClientExecutionParams() + .withOperationName("OperationWithChecksumRequired") + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withInput(operationWithChecksumRequiredRequest) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, + HttpChecksumRequired.create()) + .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -263,30 +319,30 @@ public GetWithoutRequiredMembersResponse getWithoutRequiredMembers( */ @Override public PaginatedOperationWithResultKeyResponse paginatedOperationWithResultKey( - PaginatedOperationWithResultKeyRequest paginatedOperationWithResultKeyRequest) throws AwsServiceException, - SdkClientException, JsonException { + PaginatedOperationWithResultKeyRequest paginatedOperationWithResultKeyRequest) throws AwsServiceException, + SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, PaginatedOperationWithResultKeyResponse::builder); + operationMetadata, PaginatedOperationWithResultKeyResponse::builder); HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata); List metricPublishers = resolveMetricPublishers(clientConfiguration, - paginatedOperationWithResultKeyRequest.overrideConfiguration().orElse(null)); + paginatedOperationWithResultKeyRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PaginatedOperationWithResultKey"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("PaginatedOperationWithResultKey").withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler).withInput(paginatedOperationWithResultKeyRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new PaginatedOperationWithResultKeyRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("PaginatedOperationWithResultKey").withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler).withInput(paginatedOperationWithResultKeyRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new PaginatedOperationWithResultKeyRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -365,8 +421,8 @@ public PaginatedOperationWithResultKeyResponse paginatedOperationWithResultKey( */ @Override public PaginatedOperationWithResultKeyIterable paginatedOperationWithResultKeyPaginator( - PaginatedOperationWithResultKeyRequest paginatedOperationWithResultKeyRequest) throws AwsServiceException, - SdkClientException, JsonException { + PaginatedOperationWithResultKeyRequest paginatedOperationWithResultKeyRequest) throws AwsServiceException, + SdkClientException, JsonException { return new PaginatedOperationWithResultKeyIterable(this, applyPaginatorUserAgent(paginatedOperationWithResultKeyRequest)); } @@ -388,30 +444,30 @@ public PaginatedOperationWithResultKeyIterable paginatedOperationWithResultKeyPa */ @Override public PaginatedOperationWithoutResultKeyResponse paginatedOperationWithoutResultKey( - PaginatedOperationWithoutResultKeyRequest paginatedOperationWithoutResultKeyRequest) throws AwsServiceException, - SdkClientException, JsonException { + PaginatedOperationWithoutResultKeyRequest paginatedOperationWithoutResultKeyRequest) throws AwsServiceException, + SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, PaginatedOperationWithoutResultKeyResponse::builder); + operationMetadata, PaginatedOperationWithoutResultKeyResponse::builder); HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata); List metricPublishers = resolveMetricPublishers(clientConfiguration, - paginatedOperationWithoutResultKeyRequest.overrideConfiguration().orElse(null)); + paginatedOperationWithoutResultKeyRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PaginatedOperationWithoutResultKey"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("PaginatedOperationWithoutResultKey").withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler).withInput(paginatedOperationWithoutResultKeyRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new PaginatedOperationWithoutResultKeyRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("PaginatedOperationWithoutResultKey").withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler).withInput(paginatedOperationWithoutResultKeyRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new PaginatedOperationWithoutResultKeyRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -490,10 +546,10 @@ public PaginatedOperationWithoutResultKeyResponse paginatedOperationWithoutResul */ @Override public PaginatedOperationWithoutResultKeyIterable paginatedOperationWithoutResultKeyPaginator( - PaginatedOperationWithoutResultKeyRequest paginatedOperationWithoutResultKeyRequest) throws AwsServiceException, - SdkClientException, JsonException { + PaginatedOperationWithoutResultKeyRequest paginatedOperationWithoutResultKeyRequest) throws AwsServiceException, + SdkClientException, JsonException { return new PaginatedOperationWithoutResultKeyIterable(this, - applyPaginatorUserAgent(paginatedOperationWithoutResultKeyRequest)); + applyPaginatorUserAgent(paginatedOperationWithoutResultKeyRequest)); } /** @@ -525,35 +581,35 @@ public PaginatedOperationWithoutResultKeyIterable paginatedOperationWithoutResul */ @Override public StreamingInputOperationResponse streamingInputOperation(StreamingInputOperationRequest streamingInputOperationRequest, - RequestBody requestBody) throws AwsServiceException, SdkClientException, JsonException { + RequestBody requestBody) throws AwsServiceException, SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, StreamingInputOperationResponse::builder); + operationMetadata, StreamingInputOperationResponse::builder); HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingInputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingInputOperation"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("StreamingInputOperation") - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withInput(streamingInputOperationRequest) - .withMetricCollector(apiCallMetricCollector) - .withRequestBody(requestBody) - .withMarshaller( - StreamingRequestMarshaller.builder() - .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) - .requestBody(requestBody).build())); + .execute(new ClientExecutionParams() + .withOperationName("StreamingInputOperation") + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withInput(streamingInputOperationRequest) + .withMetricCollector(apiCallMetricCollector) + .withRequestBody(requestBody) + .withMarshaller( + StreamingRequestMarshaller.builder() + .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) + .requestBody(requestBody).build())); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -595,41 +651,41 @@ public StreamingInputOperationResponse streamingInputOperation(StreamingInputOpe */ @Override public ReturnT streamingInputOutputOperation( - StreamingInputOutputOperationRequest streamingInputOutputOperationRequest, RequestBody requestBody, - ResponseTransformer responseTransformer) throws AwsServiceException, - SdkClientException, JsonException { + StreamingInputOutputOperationRequest streamingInputOutputOperationRequest, RequestBody requestBody, + ResponseTransformer responseTransformer) throws AwsServiceException, + SdkClientException, JsonException { streamingInputOutputOperationRequest = applySignerOverride(streamingInputOutputOperationRequest, - Aws4UnsignedPayloadSigner.create()); + Aws4UnsignedPayloadSigner.create()); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(true) - .isPayloadJson(false).build(); + .isPayloadJson(false).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, StreamingInputOutputOperationResponse::builder); + operationMetadata, StreamingInputOutputOperationResponse::builder); HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata); List metricPublishers = resolveMetricPublishers(clientConfiguration, - streamingInputOutputOperationRequest.overrideConfiguration().orElse(null)); + streamingInputOutputOperationRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingInputOutputOperation"); return clientHandler.execute( - new ClientExecutionParams() - .withOperationName("StreamingInputOutputOperation") - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withInput(streamingInputOutputOperationRequest) - .withMetricCollector(apiCallMetricCollector) - .withRequestBody(requestBody) - .withMarshaller( - StreamingRequestMarshaller - .builder() - .delegateMarshaller( - new StreamingInputOutputOperationRequestMarshaller(protocolFactory)) - .requestBody(requestBody).transferEncoding(true).build()), responseTransformer); + new ClientExecutionParams() + .withOperationName("StreamingInputOutputOperation") + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withInput(streamingInputOutputOperationRequest) + .withMetricCollector(apiCallMetricCollector) + .withRequestBody(requestBody) + .withMarshaller( + StreamingRequestMarshaller + .builder() + .delegateMarshaller( + new StreamingInputOutputOperationRequestMarshaller(protocolFactory)) + .requestBody(requestBody).transferEncoding(true).build()), responseTransformer); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -660,37 +716,37 @@ public ReturnT streamingInputOutputOperation( */ @Override public ReturnT streamingOutputOperation(StreamingOutputOperationRequest streamingOutputOperationRequest, - ResponseTransformer responseTransformer) throws AwsServiceException, - SdkClientException, JsonException { + ResponseTransformer responseTransformer) throws AwsServiceException, + SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(true) - .isPayloadJson(false).build(); + .isPayloadJson(false).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, StreamingOutputOperationResponse::builder); + operationMetadata, StreamingOutputOperationResponse::builder); HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingOutputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingOutputOperation"); return clientHandler.execute( - new ClientExecutionParams() - .withOperationName("StreamingOutputOperation").withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler).withInput(streamingOutputOperationRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)), responseTransformer); + new ClientExecutionParams() + .withOperationName("StreamingOutputOperation").withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler).withInput(streamingOutputOperationRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)), responseTransformer); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } } private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration, - RequestOverrideConfiguration requestOverrideConfiguration) { + RequestOverrideConfiguration requestOverrideConfiguration) { List publishers = null; if (requestOverrideConfiguration != null) { publishers = requestOverrideConfiguration.metricPublishers(); @@ -705,19 +761,19 @@ private static List resolveMetricPublishers(SdkClientConfigurat } private HttpResponseHandler createErrorResponseHandler(BaseAwsJsonProtocolFactory protocolFactory, - JsonOperationMetadata operationMetadata) { + JsonOperationMetadata operationMetadata) { return protocolFactory.createErrorResponseHandler(operationMetadata); } private > T init(T builder) { return builder - .clientConfiguration(clientConfiguration) - .defaultServiceExceptionSupplier(JsonException::builder) - .protocol(AwsJsonProtocol.REST_JSON) - .protocolVersion("1.1") - .registerModeledException( - ExceptionMetadata.builder().errorCode("InvalidInput") - .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()); + .clientConfiguration(clientConfiguration) + .defaultServiceExceptionSupplier(JsonException::builder) + .protocol(AwsJsonProtocol.REST_JSON) + .protocolVersion("1.1") + .registerModeledException( + ExceptionMetadata.builder().errorCode("InvalidInput") + .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()); } @Override @@ -727,10 +783,10 @@ public void close() { private T applyPaginatorUserAgent(T request) { Consumer userAgentApplier = b -> b.addApiName(ApiName.builder() - .version(VersionInfo.SDK_VERSION).name("PAGINATED").build()); + .version(VersionInfo.SDK_VERSION).name("PAGINATED").build()); AwsRequestOverrideConfiguration overrideConfiguration = request.overrideConfiguration() - .map(c -> c.toBuilder().applyMutation(userAgentApplier).build()) - .orElse((AwsRequestOverrideConfiguration.builder().applyMutation(userAgentApplier).build())); + .map(c -> c.toBuilder().applyMutation(userAgentApplier).build()) + .orElse((AwsRequestOverrideConfiguration.builder().applyMutation(userAgentApplier).build())); return (T) request.toBuilder().overrideConfiguration(overrideConfiguration).build(); } @@ -740,8 +796,8 @@ private T applySignerOverride(T request, Signer signer) } Consumer signerOverride = b -> b.signer(signer).build(); AwsRequestOverrideConfiguration overrideConfiguration = request.overrideConfiguration() - .map(c -> c.toBuilder().applyMutation(signerOverride).build()) - .orElse((AwsRequestOverrideConfiguration.builder().applyMutation(signerOverride).build())); + .map(c -> c.toBuilder().applyMutation(signerOverride).build()) + .orElse((AwsRequestOverrideConfiguration.builder().applyMutation(signerOverride).build())); return (T) request.toBuilder().overrideConfiguration(overrideConfiguration).build(); } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-interface.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-interface.java index 2685e7061b1f..77c4aa00dc3d 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-interface.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-interface.java @@ -19,6 +19,8 @@ import software.amazon.awssdk.services.json.model.GetWithoutRequiredMembersResponse; import software.amazon.awssdk.services.json.model.InvalidInputException; import software.amazon.awssdk.services.json.model.JsonException; +import software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredRequest; +import software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredResponse; import software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyRequest; import software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyResponse; import software.amazon.awssdk.services.json.model.PaginatedOperationWithoutResultKeyRequest; @@ -109,7 +111,7 @@ default APostOperationResponse aPostOperation(APostOperationRequest aPostOperati * API Documentation */ default APostOperationResponse aPostOperation(Consumer aPostOperationRequest) - throws InvalidInputException, AwsServiceException, SdkClientException, JsonException { + throws InvalidInputException, AwsServiceException, SdkClientException, JsonException { return aPostOperation(APostOperationRequest.builder().applyMutation(aPostOperationRequest).build()); } @@ -134,8 +136,8 @@ default APostOperationResponse aPostOperation(ConsumerAWS API Documentation */ default APostOperationWithOutputResponse aPostOperationWithOutput( - APostOperationWithOutputRequest aPostOperationWithOutputRequest) throws InvalidInputException, AwsServiceException, - SdkClientException, JsonException { + APostOperationWithOutputRequest aPostOperationWithOutputRequest) throws InvalidInputException, AwsServiceException, + SdkClientException, JsonException { throw new UnsupportedOperationException(); } @@ -167,8 +169,8 @@ default APostOperationWithOutputResponse aPostOperationWithOutput( * target="_top">AWS API Documentation */ default APostOperationWithOutputResponse aPostOperationWithOutput( - Consumer aPostOperationWithOutputRequest) throws InvalidInputException, - AwsServiceException, SdkClientException, JsonException { + Consumer aPostOperationWithOutputRequest) throws InvalidInputException, + AwsServiceException, SdkClientException, JsonException { return aPostOperationWithOutput(APostOperationWithOutputRequest.builder().applyMutation(aPostOperationWithOutputRequest) .build()); } @@ -194,8 +196,8 @@ default APostOperationWithOutputResponse aPostOperationWithOutput( * target="_top">AWS API Documentation */ default GetWithoutRequiredMembersResponse getWithoutRequiredMembers( - GetWithoutRequiredMembersRequest getWithoutRequiredMembersRequest) throws InvalidInputException, AwsServiceException, - SdkClientException, JsonException { + GetWithoutRequiredMembersRequest getWithoutRequiredMembersRequest) throws InvalidInputException, AwsServiceException, + SdkClientException, JsonException { throw new UnsupportedOperationException(); } @@ -227,12 +229,63 @@ default GetWithoutRequiredMembersResponse getWithoutRequiredMembers( * target="_top">AWS API Documentation */ default GetWithoutRequiredMembersResponse getWithoutRequiredMembers( - Consumer getWithoutRequiredMembersRequest) throws InvalidInputException, - AwsServiceException, SdkClientException, JsonException { + Consumer getWithoutRequiredMembersRequest) throws InvalidInputException, + AwsServiceException, SdkClientException, JsonException { return getWithoutRequiredMembers(GetWithoutRequiredMembersRequest.builder() .applyMutation(getWithoutRequiredMembersRequest).build()); } + /** + * Invokes the OperationWithChecksumRequired operation. + * + * @param operationWithChecksumRequiredRequest + * @return Result of the OperationWithChecksumRequired operation returned by the service. + * @throws SdkException + * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for + * catch all scenarios. + * @throws SdkClientException + * If any client side error occurs such as an IO related failure, failure to get credentials, etc. + * @throws JsonException + * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. + * @sample JsonClient.OperationWithChecksumRequired + * @see AWS API Documentation + */ + default OperationWithChecksumRequiredResponse operationWithChecksumRequired( + OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) throws AwsServiceException, + SdkClientException, JsonException { + throw new UnsupportedOperationException(); + } + + /** + * Invokes the OperationWithChecksumRequired operation.
+ *

+ * This is a convenience which creates an instance of the {@link OperationWithChecksumRequiredRequest.Builder} + * avoiding the need to create one manually via {@link OperationWithChecksumRequiredRequest#builder()} + *

+ * + * @param operationWithChecksumRequiredRequest + * A {@link Consumer} that will call methods on {@link OperationWithChecksumRequiredRequest.Builder} to + * create a request. + * @return Result of the OperationWithChecksumRequired operation returned by the service. + * @throws SdkException + * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for + * catch all scenarios. + * @throws SdkClientException + * If any client side error occurs such as an IO related failure, failure to get credentials, etc. + * @throws JsonException + * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. + * @sample JsonClient.OperationWithChecksumRequired + * @see AWS API Documentation + */ + default OperationWithChecksumRequiredResponse operationWithChecksumRequired( + Consumer operationWithChecksumRequiredRequest) + throws AwsServiceException, SdkClientException, JsonException { + return operationWithChecksumRequired(OperationWithChecksumRequiredRequest.builder() + .applyMutation(operationWithChecksumRequiredRequest).build()); + } + /** * Some paginated operation with result_key in paginators.json file * @@ -271,8 +324,8 @@ default PaginatedOperationWithResultKeyResponse paginatedOperationWithResultKey( * target="_top">AWS API Documentation */ default PaginatedOperationWithResultKeyResponse paginatedOperationWithResultKey( - PaginatedOperationWithResultKeyRequest paginatedOperationWithResultKeyRequest) throws AwsServiceException, - SdkClientException, JsonException { + PaginatedOperationWithResultKeyRequest paginatedOperationWithResultKeyRequest) throws AwsServiceException, + SdkClientException, JsonException { throw new UnsupportedOperationException(); } @@ -299,8 +352,8 @@ default PaginatedOperationWithResultKeyResponse paginatedOperationWithResultKey( * target="_top">AWS API Documentation */ default PaginatedOperationWithResultKeyResponse paginatedOperationWithResultKey( - Consumer paginatedOperationWithResultKeyRequest) - throws AwsServiceException, SdkClientException, JsonException { + Consumer paginatedOperationWithResultKeyRequest) + throws AwsServiceException, SdkClientException, JsonException { return paginatedOperationWithResultKey(PaginatedOperationWithResultKeyRequest.builder() .applyMutation(paginatedOperationWithResultKeyRequest).build()); } @@ -453,8 +506,8 @@ default PaginatedOperationWithResultKeyIterable paginatedOperationWithResultKeyP * target="_top">AWS API Documentation */ default PaginatedOperationWithResultKeyIterable paginatedOperationWithResultKeyPaginator( - PaginatedOperationWithResultKeyRequest paginatedOperationWithResultKeyRequest) throws AwsServiceException, - SdkClientException, JsonException { + PaginatedOperationWithResultKeyRequest paginatedOperationWithResultKeyRequest) throws AwsServiceException, + SdkClientException, JsonException { throw new UnsupportedOperationException(); } @@ -536,8 +589,8 @@ default PaginatedOperationWithResultKeyIterable paginatedOperationWithResultKeyP * target="_top">AWS API Documentation */ default PaginatedOperationWithResultKeyIterable paginatedOperationWithResultKeyPaginator( - Consumer paginatedOperationWithResultKeyRequest) - throws AwsServiceException, SdkClientException, JsonException { + Consumer paginatedOperationWithResultKeyRequest) + throws AwsServiceException, SdkClientException, JsonException { return paginatedOperationWithResultKeyPaginator(PaginatedOperationWithResultKeyRequest.builder() .applyMutation(paginatedOperationWithResultKeyRequest).build()); } @@ -559,8 +612,8 @@ default PaginatedOperationWithResultKeyIterable paginatedOperationWithResultKeyP * target="_top">AWS API Documentation */ default PaginatedOperationWithoutResultKeyResponse paginatedOperationWithoutResultKey( - PaginatedOperationWithoutResultKeyRequest paginatedOperationWithoutResultKeyRequest) throws AwsServiceException, - SdkClientException, JsonException { + PaginatedOperationWithoutResultKeyRequest paginatedOperationWithoutResultKeyRequest) throws AwsServiceException, + SdkClientException, JsonException { throw new UnsupportedOperationException(); } @@ -587,8 +640,8 @@ default PaginatedOperationWithoutResultKeyResponse paginatedOperationWithoutResu * target="_top">AWS API Documentation */ default PaginatedOperationWithoutResultKeyResponse paginatedOperationWithoutResultKey( - Consumer paginatedOperationWithoutResultKeyRequest) - throws AwsServiceException, SdkClientException, JsonException { + Consumer paginatedOperationWithoutResultKeyRequest) + throws AwsServiceException, SdkClientException, JsonException { return paginatedOperationWithoutResultKey(PaginatedOperationWithoutResultKeyRequest.builder() .applyMutation(paginatedOperationWithoutResultKeyRequest).build()); } @@ -665,8 +718,8 @@ default PaginatedOperationWithoutResultKeyResponse paginatedOperationWithoutResu * target="_top">AWS API Documentation */ default PaginatedOperationWithoutResultKeyIterable paginatedOperationWithoutResultKeyPaginator( - PaginatedOperationWithoutResultKeyRequest paginatedOperationWithoutResultKeyRequest) throws AwsServiceException, - SdkClientException, JsonException { + PaginatedOperationWithoutResultKeyRequest paginatedOperationWithoutResultKeyRequest) throws AwsServiceException, + SdkClientException, JsonException { throw new UnsupportedOperationException(); } @@ -748,8 +801,8 @@ default PaginatedOperationWithoutResultKeyIterable paginatedOperationWithoutResu * target="_top">AWS API Documentation */ default PaginatedOperationWithoutResultKeyIterable paginatedOperationWithoutResultKeyPaginator( - Consumer paginatedOperationWithoutResultKeyRequest) - throws AwsServiceException, SdkClientException, JsonException { + Consumer paginatedOperationWithoutResultKeyRequest) + throws AwsServiceException, SdkClientException, JsonException { return paginatedOperationWithoutResultKeyPaginator(PaginatedOperationWithoutResultKeyRequest.builder() .applyMutation(paginatedOperationWithoutResultKeyRequest).build()); } @@ -782,8 +835,8 @@ default PaginatedOperationWithoutResultKeyIterable paginatedOperationWithoutResu * target="_top">AWS API Documentation */ default StreamingInputOperationResponse streamingInputOperation( - StreamingInputOperationRequest streamingInputOperationRequest, RequestBody requestBody) throws AwsServiceException, - SdkClientException, JsonException { + StreamingInputOperationRequest streamingInputOperationRequest, RequestBody requestBody) throws AwsServiceException, + SdkClientException, JsonException { throw new UnsupportedOperationException(); } @@ -821,8 +874,8 @@ default StreamingInputOperationResponse streamingInputOperation( * target="_top">AWS API Documentation */ default StreamingInputOperationResponse streamingInputOperation( - Consumer streamingInputOperationRequest, RequestBody requestBody) - throws AwsServiceException, SdkClientException, JsonException { + Consumer streamingInputOperationRequest, RequestBody requestBody) + throws AwsServiceException, SdkClientException, JsonException { return streamingInputOperation(StreamingInputOperationRequest.builder().applyMutation(streamingInputOperationRequest) .build(), requestBody); } @@ -850,8 +903,8 @@ default StreamingInputOperationResponse streamingInputOperation( * target="_top">AWS API Documentation */ default StreamingInputOperationResponse streamingInputOperation( - StreamingInputOperationRequest streamingInputOperationRequest, Path sourcePath) throws AwsServiceException, - SdkClientException, JsonException { + StreamingInputOperationRequest streamingInputOperationRequest, Path sourcePath) throws AwsServiceException, + SdkClientException, JsonException { return streamingInputOperation(streamingInputOperationRequest, RequestBody.fromFile(sourcePath)); } @@ -884,8 +937,8 @@ default StreamingInputOperationResponse streamingInputOperation( * target="_top">AWS API Documentation */ default StreamingInputOperationResponse streamingInputOperation( - Consumer streamingInputOperationRequest, Path sourcePath) - throws AwsServiceException, SdkClientException, JsonException { + Consumer streamingInputOperationRequest, Path sourcePath) + throws AwsServiceException, SdkClientException, JsonException { return streamingInputOperation(StreamingInputOperationRequest.builder().applyMutation(streamingInputOperationRequest) .build(), sourcePath); } @@ -907,11 +960,11 @@ default StreamingInputOperationResponse streamingInputOperation( * The service documentation for the request content is as follows 'This be a stream' * @param responseTransformer * Functional interface for processing the streamed response content. The unmarshalled - * StreamingInputOutputOperationResponse and an InputStream to the response content are provided as parameters - * to the callback. The callback may return a transformed type which will be the return value of this method. - * See {@link software.amazon.awssdk.core.sync.ResponseTransformer} for details on implementing this - * interface and for links to pre-canned implementations for common scenarios like downloading to a file. The - * service documentation for the response content is as follows 'This be a stream'. + * StreamingInputOutputOperationResponse and an InputStream to the response content are provided as + * parameters to the callback. The callback may return a transformed type which will be the return value of + * this method. See {@link software.amazon.awssdk.core.sync.ResponseTransformer} for details on implementing + * this interface and for links to pre-canned implementations for common scenarios like downloading to a + * file. The service documentation for the response content is as follows 'This be a stream'. * @return The transformed result of the ResponseTransformer. * @throws SdkException * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for @@ -925,9 +978,9 @@ default StreamingInputOperationResponse streamingInputOperation( * target="_top">AWS API Documentation */ default ReturnT streamingInputOutputOperation( - StreamingInputOutputOperationRequest streamingInputOutputOperationRequest, RequestBody requestBody, - ResponseTransformer responseTransformer) throws AwsServiceException, - SdkClientException, JsonException { + StreamingInputOutputOperationRequest streamingInputOutputOperationRequest, RequestBody requestBody, + ResponseTransformer responseTransformer) throws AwsServiceException, + SdkClientException, JsonException { throw new UnsupportedOperationException(); } @@ -954,11 +1007,11 @@ default ReturnT streamingInputOutputOperation( * The service documentation for the request content is as follows 'This be a stream' * @param responseTransformer * Functional interface for processing the streamed response content. The unmarshalled - * StreamingInputOutputOperationResponse and an InputStream to the response content are provided as parameters - * to the callback. The callback may return a transformed type which will be the return value of this method. - * See {@link software.amazon.awssdk.core.sync.ResponseTransformer} for details on implementing this - * interface and for links to pre-canned implementations for common scenarios like downloading to a file. The - * service documentation for the response content is as follows 'This be a stream'. + * StreamingInputOutputOperationResponse and an InputStream to the response content are provided as + * parameters to the callback. The callback may return a transformed type which will be the return value of + * this method. See {@link software.amazon.awssdk.core.sync.ResponseTransformer} for details on implementing + * this interface and for links to pre-canned implementations for common scenarios like downloading to a + * file. The service documentation for the response content is as follows 'This be a stream'. * @return The transformed result of the ResponseTransformer. * @throws SdkException * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for @@ -972,12 +1025,12 @@ default ReturnT streamingInputOutputOperation( * target="_top">AWS API Documentation */ default ReturnT streamingInputOutputOperation( - Consumer streamingInputOutputOperationRequest, RequestBody requestBody, - ResponseTransformer responseTransformer) throws AwsServiceException, - SdkClientException, JsonException { + Consumer streamingInputOutputOperationRequest, RequestBody requestBody, + ResponseTransformer responseTransformer) throws AwsServiceException, + SdkClientException, JsonException { return streamingInputOutputOperation( - StreamingInputOutputOperationRequest.builder().applyMutation(streamingInputOutputOperationRequest).build(), - requestBody, responseTransformer); + StreamingInputOutputOperationRequest.builder().applyMutation(streamingInputOutputOperationRequest).build(), + requestBody, responseTransformer); } /** @@ -1008,8 +1061,8 @@ default ReturnT streamingInputOutputOperation( * target="_top">AWS API Documentation */ default StreamingInputOutputOperationResponse streamingInputOutputOperation( - StreamingInputOutputOperationRequest streamingInputOutputOperationRequest, Path sourcePath, Path destinationPath) - throws AwsServiceException, SdkClientException, JsonException { + StreamingInputOutputOperationRequest streamingInputOutputOperationRequest, Path sourcePath, Path destinationPath) + throws AwsServiceException, SdkClientException, JsonException { return streamingInputOutputOperation(streamingInputOutputOperationRequest, RequestBody.fromFile(sourcePath), ResponseTransformer.toFile(destinationPath)); } @@ -1048,11 +1101,11 @@ default StreamingInputOutputOperationResponse streamingInputOutputOperation( * target="_top">AWS API Documentation */ default StreamingInputOutputOperationResponse streamingInputOutputOperation( - Consumer streamingInputOutputOperationRequest, Path sourcePath, - Path destinationPath) throws AwsServiceException, SdkClientException, JsonException { + Consumer streamingInputOutputOperationRequest, Path sourcePath, + Path destinationPath) throws AwsServiceException, SdkClientException, JsonException { return streamingInputOutputOperation( - StreamingInputOutputOperationRequest.builder().applyMutation(streamingInputOutputOperationRequest).build(), - sourcePath, destinationPath); + StreamingInputOutputOperationRequest.builder().applyMutation(streamingInputOutputOperationRequest).build(), + sourcePath, destinationPath); } /** @@ -1061,8 +1114,8 @@ default StreamingInputOutputOperationResponse streamingInputOutputOperation( * @param streamingOutputOperationRequest * @param responseTransformer * Functional interface for processing the streamed response content. The unmarshalled - * StreamingOutputOperationResponse and an InputStream to the response content are provided as parameters - * to the callback. The callback may return a transformed type which will be the return value of this method. + * StreamingOutputOperationResponse and an InputStream to the response content are provided as parameters to + * the callback. The callback may return a transformed type which will be the return value of this method. * See {@link software.amazon.awssdk.core.sync.ResponseTransformer} for details on implementing this * interface and for links to pre-canned implementations for common scenarios like downloading to a file. The * service documentation for the response content is as follows 'This be a stream'. @@ -1096,8 +1149,8 @@ default ReturnT streamingOutputOperation(StreamingOutputOperationReque * request. * @param responseTransformer * Functional interface for processing the streamed response content. The unmarshalled - * StreamingOutputOperationResponse and an InputStream to the response content are provided as parameters - * to the callback. The callback may return a transformed type which will be the return value of this method. + * StreamingOutputOperationResponse and an InputStream to the response content are provided as parameters to + * the callback. The callback may return a transformed type which will be the return value of this method. * See {@link software.amazon.awssdk.core.sync.ResponseTransformer} for details on implementing this * interface and for links to pre-canned implementations for common scenarios like downloading to a file. The * service documentation for the response content is as follows 'This be a stream'. @@ -1114,9 +1167,9 @@ default ReturnT streamingOutputOperation(StreamingOutputOperationReque * target="_top">AWS API Documentation */ default ReturnT streamingOutputOperation( - Consumer streamingOutputOperationRequest, - ResponseTransformer responseTransformer) throws AwsServiceException, - SdkClientException, JsonException { + Consumer streamingOutputOperationRequest, + ResponseTransformer responseTransformer) throws AwsServiceException, + SdkClientException, JsonException { return streamingOutputOperation(StreamingOutputOperationRequest.builder().applyMutation(streamingOutputOperationRequest) .build(), responseTransformer); } @@ -1143,8 +1196,8 @@ default ReturnT streamingOutputOperation( * target="_top">AWS API Documentation */ default StreamingOutputOperationResponse streamingOutputOperation( - StreamingOutputOperationRequest streamingOutputOperationRequest, Path destinationPath) throws AwsServiceException, - SdkClientException, JsonException { + StreamingOutputOperationRequest streamingOutputOperationRequest, Path destinationPath) throws AwsServiceException, + SdkClientException, JsonException { return streamingOutputOperation(streamingOutputOperationRequest, ResponseTransformer.toFile(destinationPath)); } @@ -1176,8 +1229,8 @@ default StreamingOutputOperationResponse streamingOutputOperation( * target="_top">AWS API Documentation */ default StreamingOutputOperationResponse streamingOutputOperation( - Consumer streamingOutputOperationRequest, Path destinationPath) - throws AwsServiceException, SdkClientException, JsonException { + Consumer streamingOutputOperationRequest, Path destinationPath) + throws AwsServiceException, SdkClientException, JsonException { return streamingOutputOperation(StreamingOutputOperationRequest.builder().applyMutation(streamingOutputOperationRequest) .build(), destinationPath); } @@ -1205,8 +1258,8 @@ default StreamingOutputOperationResponse streamingOutputOperation( * target="_top">AWS API Documentation */ default ResponseInputStream streamingOutputOperation( - StreamingOutputOperationRequest streamingOutputOperationRequest) throws AwsServiceException, SdkClientException, - JsonException { + StreamingOutputOperationRequest streamingOutputOperationRequest) throws AwsServiceException, SdkClientException, + JsonException { return streamingOutputOperation(streamingOutputOperationRequest, ResponseTransformer.toInputStream()); } @@ -1239,8 +1292,8 @@ default ResponseInputStream streamingOutputOpe * target="_top">AWS API Documentation */ default ResponseInputStream streamingOutputOperation( - Consumer streamingOutputOperationRequest) throws AwsServiceException, - SdkClientException, JsonException { + Consumer streamingOutputOperationRequest) throws AwsServiceException, + SdkClientException, JsonException { return streamingOutputOperation(StreamingOutputOperationRequest.builder().applyMutation(streamingOutputOperationRequest) .build()); } @@ -1266,8 +1319,8 @@ default ResponseInputStream streamingOutputOpe * target="_top">AWS API Documentation */ default ResponseBytes streamingOutputOperationAsBytes( - StreamingOutputOperationRequest streamingOutputOperationRequest) throws AwsServiceException, SdkClientException, - JsonException { + StreamingOutputOperationRequest streamingOutputOperationRequest) throws AwsServiceException, SdkClientException, + JsonException { return streamingOutputOperation(streamingOutputOperationRequest, ResponseTransformer.toBytes()); } @@ -1298,8 +1351,8 @@ default ResponseBytes streamingOutputOperation * target="_top">AWS API Documentation */ default ResponseBytes streamingOutputOperationAsBytes( - Consumer streamingOutputOperationRequest) throws AwsServiceException, - SdkClientException, JsonException { + Consumer streamingOutputOperationRequest) throws AwsServiceException, + SdkClientException, JsonException { return streamingOutputOperationAsBytes(StreamingOutputOperationRequest.builder() .applyMutation(streamingOutputOperationRequest).build()); } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-async-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-async-client-class.java index 78b683efbc26..7255486fa623 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-async-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-async-client-class.java @@ -23,6 +23,8 @@ import software.amazon.awssdk.core.client.handler.AsyncClientHandler; import software.amazon.awssdk.core.client.handler.ClientExecutionParams; import software.amazon.awssdk.core.http.HttpResponseHandler; +import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; +import software.amazon.awssdk.core.interceptor.trait.HttpChecksumRequired; import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.core.runtime.transform.AsyncStreamingRequestMarshaller; import software.amazon.awssdk.core.signer.Signer; @@ -36,6 +38,8 @@ import software.amazon.awssdk.services.query.model.APostOperationWithOutputRequest; import software.amazon.awssdk.services.query.model.APostOperationWithOutputResponse; import software.amazon.awssdk.services.query.model.InvalidInputException; +import software.amazon.awssdk.services.query.model.OperationWithChecksumRequiredRequest; +import software.amazon.awssdk.services.query.model.OperationWithChecksumRequiredResponse; import software.amazon.awssdk.services.query.model.QueryException; import software.amazon.awssdk.services.query.model.QueryRequest; import software.amazon.awssdk.services.query.model.StreamingInputOperationRequest; @@ -44,6 +48,7 @@ import software.amazon.awssdk.services.query.model.StreamingOutputOperationResponse; import software.amazon.awssdk.services.query.transform.APostOperationRequestMarshaller; import software.amazon.awssdk.services.query.transform.APostOperationWithOutputRequestMarshaller; +import software.amazon.awssdk.services.query.transform.OperationWithChecksumRequiredRequestMarshaller; import software.amazon.awssdk.services.query.transform.StreamingInputOperationRequestMarshaller; import software.amazon.awssdk.services.query.transform.StreamingOutputOperationRequestMarshaller; import software.amazon.awssdk.services.query.waiters.QueryAsyncWaiter; @@ -105,27 +110,27 @@ public final String serviceName() { @Override public CompletableFuture aPostOperation(APostOperationRequest aPostOperationRequest) { List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperation"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(APostOperationResponse::builder); + .createResponseHandler(APostOperationResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); String hostPrefix = "foo-"; String resolvedHostExpression = "foo-"; CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("APostOperation") - .withMarshaller(new APostOperationRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withMetricCollector(apiCallMetricCollector).hostPrefixExpression(resolvedHostExpression) - .withInput(aPostOperationRequest)); + .execute(new ClientExecutionParams() + .withOperationName("APostOperation") + .withMarshaller(new APostOperationRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withMetricCollector(apiCallMetricCollector).hostPrefixExpression(resolvedHostExpression) + .withInput(aPostOperationRequest)); AwsRequestOverrideConfiguration requestOverrideConfig = aPostOperationRequest.overrideConfiguration().orElse(null); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { @@ -163,28 +168,28 @@ public CompletableFuture aPostOperation(APostOperationRe */ @Override public CompletableFuture aPostOperationWithOutput( - APostOperationWithOutputRequest aPostOperationWithOutputRequest) { + APostOperationWithOutputRequest aPostOperationWithOutputRequest) { List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationWithOutputRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperationWithOutput"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(APostOperationWithOutputResponse::builder); + .createResponseHandler(APostOperationWithOutputResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("APostOperationWithOutput") - .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withMetricCollector(apiCallMetricCollector).withInput(aPostOperationWithOutputRequest)); + .execute(new ClientExecutionParams() + .withOperationName("APostOperationWithOutput") + .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withMetricCollector(apiCallMetricCollector).withInput(aPostOperationWithOutputRequest)); AwsRequestOverrideConfiguration requestOverrideConfig = aPostOperationWithOutputRequest.overrideConfiguration() - .orElse(null); + .orElse(null); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -196,6 +201,64 @@ public CompletableFuture aPostOperationWithOut } } + /** + * Invokes the OperationWithChecksumRequired operation asynchronously. + * + * @param operationWithChecksumRequiredRequest + * @return A Java Future containing the result of the OperationWithChecksumRequired operation returned by the + * service.
+ * The CompletableFuture returned by this method can be completed exceptionally with the following + * exceptions. + *
    + *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). + * Can be used for catch all scenarios.
  • + *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get + * credentials, etc.
  • + *
  • QueryException Base class for all service exceptions. Unknown exceptions will be thrown as an + * instance of this type.
  • + *
+ * @sample QueryAsyncClient.OperationWithChecksumRequired + * @see AWS API Documentation + */ + @Override + public CompletableFuture operationWithChecksumRequired( + OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) { + List metricPublishers = resolveMetricPublishers(clientConfiguration, + operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); + MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector + .create("ApiCall"); + try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithChecksumRequired"); + + HttpResponseHandler responseHandler = protocolFactory + .createResponseHandler(OperationWithChecksumRequiredResponse::builder); + + HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); + + CompletableFuture executeFuture = clientHandler + .execute(new ClientExecutionParams() + .withOperationName("OperationWithChecksumRequired") + .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, + HttpChecksumRequired.create()).withInput(operationWithChecksumRequiredRequest)); + AwsRequestOverrideConfiguration requestOverrideConfig = operationWithChecksumRequiredRequest.overrideConfiguration() + .orElse(null); + CompletableFuture whenCompleteFuture = null; + whenCompleteFuture = executeFuture.whenComplete((r, e) -> { + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + }); + return CompletableFutureUtils.forwardExceptionTo(whenCompleteFuture, executeFuture); + } catch (Throwable t) { + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + return CompletableFutureUtils.failedFuture(t); + } + } + /** * Some operation with a streaming input * @@ -222,11 +285,11 @@ public CompletableFuture aPostOperationWithOut */ @Override public CompletableFuture streamingInputOperation( - StreamingInputOperationRequest streamingInputOperationRequest, AsyncRequestBody requestBody) { + StreamingInputOperationRequest streamingInputOperationRequest, AsyncRequestBody requestBody) { List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingInputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingInputOperation"); @@ -235,21 +298,21 @@ public CompletableFuture streamingInputOperatio } HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(StreamingInputOperationResponse::builder); + .createResponseHandler(StreamingInputOperationResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("StreamingInputOperation") - .withMarshaller( - AsyncStreamingRequestMarshaller.builder() - .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) - .asyncRequestBody(requestBody).build()).withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler).withMetricCollector(apiCallMetricCollector) - .withAsyncRequestBody(requestBody).withInput(streamingInputOperationRequest)); + .execute(new ClientExecutionParams() + .withOperationName("StreamingInputOperation") + .withMarshaller( + AsyncStreamingRequestMarshaller.builder() + .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) + .asyncRequestBody(requestBody).build()).withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler).withMetricCollector(apiCallMetricCollector) + .withAsyncRequestBody(requestBody).withInput(streamingInputOperationRequest)); AwsRequestOverrideConfiguration requestOverrideConfig = streamingInputOperationRequest.overrideConfiguration() - .orElse(null); + .orElse(null); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -287,42 +350,42 @@ public CompletableFuture streamingInputOperatio */ @Override public CompletableFuture streamingOutputOperation( - StreamingOutputOperationRequest streamingOutputOperationRequest, - AsyncResponseTransformer asyncResponseTransformer) { + StreamingOutputOperationRequest streamingOutputOperationRequest, + AsyncResponseTransformer asyncResponseTransformer) { List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingOutputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingOutputOperation"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(StreamingOutputOperationResponse::builder); + .createResponseHandler(StreamingOutputOperationResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler.execute( - new ClientExecutionParams() - .withOperationName("StreamingOutputOperation") - .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withMetricCollector(apiCallMetricCollector).withInput(streamingOutputOperationRequest), - asyncResponseTransformer); + new ClientExecutionParams() + .withOperationName("StreamingOutputOperation") + .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withMetricCollector(apiCallMetricCollector).withInput(streamingOutputOperationRequest), + asyncResponseTransformer); AwsRequestOverrideConfiguration requestOverrideConfig = streamingOutputOperationRequest.overrideConfiguration() - .orElse(null); + .orElse(null); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { if (e != null) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> asyncResponseTransformer.exceptionOccurred(e)); + () -> asyncResponseTransformer.exceptionOccurred(e)); } metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); return CompletableFutureUtils.forwardExceptionTo(whenCompleteFuture, executeFuture); } catch (Throwable t) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> asyncResponseTransformer.exceptionOccurred(t)); + () -> asyncResponseTransformer.exceptionOccurred(t)); metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } @@ -335,15 +398,15 @@ public void close() { private AwsQueryProtocolFactory init() { return AwsQueryProtocolFactory - .builder() - .registerModeledException( - ExceptionMetadata.builder().errorCode("InvalidInput") - .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()) - .clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(QueryException::builder).build(); + .builder() + .registerModeledException( + ExceptionMetadata.builder().errorCode("InvalidInput") + .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()) + .clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(QueryException::builder).build(); } private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration, - RequestOverrideConfiguration requestOverrideConfiguration) { + RequestOverrideConfiguration requestOverrideConfiguration) { List publishers = null; if (requestOverrideConfiguration != null) { publishers = requestOverrideConfiguration.metricPublishers(); @@ -363,8 +426,8 @@ private T applySignerOverride(T request, Signer signer) } Consumer signerOverride = b -> b.signer(signer).build(); AwsRequestOverrideConfiguration overrideConfiguration = request.overrideConfiguration() - .map(c -> c.toBuilder().applyMutation(signerOverride).build()) - .orElse((AwsRequestOverrideConfiguration.builder().applyMutation(signerOverride).build())); + .map(c -> c.toBuilder().applyMutation(signerOverride).build()) + .orElse((AwsRequestOverrideConfiguration.builder().applyMutation(signerOverride).build())); return (T) request.toBuilder().overrideConfiguration(overrideConfiguration).build(); } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-client-class.java index 6b51c883623c..ba0c0d5306fd 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-client-class.java @@ -13,6 +13,8 @@ import software.amazon.awssdk.core.client.handler.SyncClientHandler; import software.amazon.awssdk.core.exception.SdkClientException; import software.amazon.awssdk.core.http.HttpResponseHandler; +import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; +import software.amazon.awssdk.core.interceptor.trait.HttpChecksumRequired; import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.core.runtime.transform.StreamingRequestMarshaller; import software.amazon.awssdk.core.sync.RequestBody; @@ -27,6 +29,8 @@ import software.amazon.awssdk.services.query.model.APostOperationWithOutputRequest; import software.amazon.awssdk.services.query.model.APostOperationWithOutputResponse; import software.amazon.awssdk.services.query.model.InvalidInputException; +import software.amazon.awssdk.services.query.model.OperationWithChecksumRequiredRequest; +import software.amazon.awssdk.services.query.model.OperationWithChecksumRequiredResponse; import software.amazon.awssdk.services.query.model.QueryException; import software.amazon.awssdk.services.query.model.StreamingInputOperationRequest; import software.amazon.awssdk.services.query.model.StreamingInputOperationResponse; @@ -34,6 +38,7 @@ import software.amazon.awssdk.services.query.model.StreamingOutputOperationResponse; import software.amazon.awssdk.services.query.transform.APostOperationRequestMarshaller; import software.amazon.awssdk.services.query.transform.APostOperationWithOutputRequestMarshaller; +import software.amazon.awssdk.services.query.transform.OperationWithChecksumRequiredRequestMarshaller; import software.amazon.awssdk.services.query.transform.StreamingInputOperationRequestMarshaller; import software.amazon.awssdk.services.query.transform.StreamingOutputOperationRequestMarshaller; import software.amazon.awssdk.services.query.waiters.QueryWaiter; @@ -88,17 +93,16 @@ public final String serviceName() { */ @Override public APostOperationResponse aPostOperation(APostOperationRequest aPostOperationRequest) throws InvalidInputException, - AwsServiceException, SdkClientException, QueryException { - + AwsServiceException, SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(APostOperationResponse::builder); + .createResponseHandler(APostOperationResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperation"); @@ -106,10 +110,10 @@ public APostOperationResponse aPostOperation(APostOperationRequest aPostOperatio String resolvedHostExpression = "foo-"; return clientHandler.execute(new ClientExecutionParams() - .withOperationName("APostOperation").withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler).hostPrefixExpression(resolvedHostExpression) - .withInput(aPostOperationRequest).withMetricCollector(apiCallMetricCollector) - .withMarshaller(new APostOperationRequestMarshaller(protocolFactory))); + .withOperationName("APostOperation").withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler).hostPrefixExpression(resolvedHostExpression) + .withInput(aPostOperationRequest).withMetricCollector(apiCallMetricCollector) + .withMarshaller(new APostOperationRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -137,27 +141,75 @@ public APostOperationResponse aPostOperation(APostOperationRequest aPostOperatio */ @Override public APostOperationWithOutputResponse aPostOperationWithOutput( - APostOperationWithOutputRequest aPostOperationWithOutputRequest) throws InvalidInputException, AwsServiceException, - SdkClientException, QueryException { + APostOperationWithOutputRequest aPostOperationWithOutputRequest) throws InvalidInputException, AwsServiceException, + SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(APostOperationWithOutputResponse::builder); + .createResponseHandler(APostOperationWithOutputResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationWithOutputRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperationWithOutput"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("APostOperationWithOutput").withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler).withInput(aPostOperationWithOutputRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("APostOperationWithOutput").withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler).withInput(aPostOperationWithOutputRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory))); + } finally { + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + } + } + + /** + * Invokes the OperationWithChecksumRequired operation. + * + * @param operationWithChecksumRequiredRequest + * @return Result of the OperationWithChecksumRequired operation returned by the service. + * @throws SdkException + * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for + * catch all scenarios. + * @throws SdkClientException + * If any client side error occurs such as an IO related failure, failure to get credentials, etc. + * @throws QueryException + * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. + * @sample QueryClient.OperationWithChecksumRequired + * @see AWS API Documentation + */ + @Override + public OperationWithChecksumRequiredResponse operationWithChecksumRequired( + OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) throws AwsServiceException, + SdkClientException, QueryException { + + HttpResponseHandler responseHandler = protocolFactory + .createResponseHandler(OperationWithChecksumRequiredResponse::builder); + + HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); + List metricPublishers = resolveMetricPublishers(clientConfiguration, + operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); + MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector + .create("ApiCall"); + try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithChecksumRequired"); + + return clientHandler + .execute(new ClientExecutionParams() + .withOperationName("OperationWithChecksumRequired") + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withInput(operationWithChecksumRequiredRequest) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, + HttpChecksumRequired.create()) + .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -171,11 +223,11 @@ public APostOperationWithOutputResponse aPostOperationWithOutput( * The content to send to the service. A {@link RequestBody} can be created using one of several factory * methods for various sources of data. For example, to create a request body from a file you can do the * following. - * + * *
      * {@code RequestBody.fromFile(new File("myfile.txt"))}
      * 
- * + * * See documentation in {@link RequestBody} for additional details and which sources of data are supported. * The service documentation for the request content is as follows 'This be a stream' * @return Result of the StreamingInputOperation operation returned by the service. @@ -192,32 +244,32 @@ public APostOperationWithOutputResponse aPostOperationWithOutput( */ @Override public StreamingInputOperationResponse streamingInputOperation(StreamingInputOperationRequest streamingInputOperationRequest, - RequestBody requestBody) throws AwsServiceException, SdkClientException, QueryException { + RequestBody requestBody) throws AwsServiceException, SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(StreamingInputOperationResponse::builder); + .createResponseHandler(StreamingInputOperationResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingInputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingInputOperation"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("StreamingInputOperation") - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withInput(streamingInputOperationRequest) - .withMetricCollector(apiCallMetricCollector) - .withRequestBody(requestBody) - .withMarshaller( - StreamingRequestMarshaller.builder() - .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) - .requestBody(requestBody).build())); + .execute(new ClientExecutionParams() + .withOperationName("StreamingInputOperation") + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withInput(streamingInputOperationRequest) + .withMetricCollector(apiCallMetricCollector) + .withRequestBody(requestBody) + .withMarshaller( + StreamingRequestMarshaller.builder() + .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) + .requestBody(requestBody).build())); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -248,34 +300,34 @@ public StreamingInputOperationResponse streamingInputOperation(StreamingInputOpe */ @Override public ReturnT streamingOutputOperation(StreamingOutputOperationRequest streamingOutputOperationRequest, - ResponseTransformer responseTransformer) throws AwsServiceException, - SdkClientException, QueryException { + ResponseTransformer responseTransformer) throws AwsServiceException, + SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(StreamingOutputOperationResponse::builder); + .createResponseHandler(StreamingOutputOperationResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingOutputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingOutputOperation"); return clientHandler.execute( - new ClientExecutionParams() - .withOperationName("StreamingOutputOperation").withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler).withInput(streamingOutputOperationRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)), responseTransformer); + new ClientExecutionParams() + .withOperationName("StreamingOutputOperation").withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler).withInput(streamingOutputOperationRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)), responseTransformer); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } } private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration, - RequestOverrideConfiguration requestOverrideConfiguration) { + RequestOverrideConfiguration requestOverrideConfiguration) { List publishers = null; if (requestOverrideConfiguration != null) { publishers = requestOverrideConfiguration.metricPublishers(); @@ -291,11 +343,11 @@ private static List resolveMetricPublishers(SdkClientConfigurat private AwsQueryProtocolFactory init() { return AwsQueryProtocolFactory - .builder() - .registerModeledException( - ExceptionMetadata.builder().errorCode("InvalidInput") - .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()) - .clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(QueryException::builder).build(); + .builder() + .registerModeledException( + ExceptionMetadata.builder().errorCode("InvalidInput") + .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()) + .clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(QueryException::builder).build(); } @Override @@ -308,4 +360,3 @@ public QueryWaiter waiter() { return QueryWaiter.builder().client(this).build(); } } - diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-async-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-async-client-class.java new file mode 100644 index 000000000000..6a2f904ad448 --- /dev/null +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-async-client-class.java @@ -0,0 +1,428 @@ +package software.amazon.awssdk.services.xml; + +import static software.amazon.awssdk.utils.FunctionalUtils.runAndLogError; + +import java.util.Collections; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.function.Consumer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.auth.signer.AsyncAws4Signer; +import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration; +import software.amazon.awssdk.awscore.client.handler.AwsAsyncClientHandler; +import software.amazon.awssdk.awscore.exception.AwsServiceException; +import software.amazon.awssdk.core.RequestOverrideConfiguration; +import software.amazon.awssdk.core.Response; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.async.AsyncResponseTransformer; +import software.amazon.awssdk.core.client.config.SdkClientConfiguration; +import software.amazon.awssdk.core.client.config.SdkClientOption; +import software.amazon.awssdk.core.client.handler.AsyncClientHandler; +import software.amazon.awssdk.core.client.handler.ClientExecutionParams; +import software.amazon.awssdk.core.http.HttpResponseHandler; +import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; +import software.amazon.awssdk.core.interceptor.trait.HttpChecksumRequired; +import software.amazon.awssdk.core.metrics.CoreMetric; +import software.amazon.awssdk.core.runtime.transform.AsyncStreamingRequestMarshaller; +import software.amazon.awssdk.core.signer.Signer; +import software.amazon.awssdk.metrics.MetricCollector; +import software.amazon.awssdk.metrics.MetricPublisher; +import software.amazon.awssdk.metrics.NoOpMetricCollector; +import software.amazon.awssdk.protocols.core.ExceptionMetadata; +import software.amazon.awssdk.protocols.xml.AwsXmlProtocolFactory; +import software.amazon.awssdk.protocols.xml.XmlOperationMetadata; +import software.amazon.awssdk.services.xml.model.APostOperationRequest; +import software.amazon.awssdk.services.xml.model.APostOperationResponse; +import software.amazon.awssdk.services.xml.model.APostOperationWithOutputRequest; +import software.amazon.awssdk.services.xml.model.APostOperationWithOutputResponse; +import software.amazon.awssdk.services.xml.model.InvalidInputException; +import software.amazon.awssdk.services.xml.model.OperationWithChecksumRequiredRequest; +import software.amazon.awssdk.services.xml.model.OperationWithChecksumRequiredResponse; +import software.amazon.awssdk.services.xml.model.StreamingInputOperationRequest; +import software.amazon.awssdk.services.xml.model.StreamingInputOperationResponse; +import software.amazon.awssdk.services.xml.model.StreamingOutputOperationRequest; +import software.amazon.awssdk.services.xml.model.StreamingOutputOperationResponse; +import software.amazon.awssdk.services.xml.model.XmlException; +import software.amazon.awssdk.services.xml.model.XmlRequest; +import software.amazon.awssdk.services.xml.transform.APostOperationRequestMarshaller; +import software.amazon.awssdk.services.xml.transform.APostOperationWithOutputRequestMarshaller; +import software.amazon.awssdk.services.xml.transform.OperationWithChecksumRequiredRequestMarshaller; +import software.amazon.awssdk.services.xml.transform.StreamingInputOperationRequestMarshaller; +import software.amazon.awssdk.services.xml.transform.StreamingOutputOperationRequestMarshaller; +import software.amazon.awssdk.utils.CompletableFutureUtils; + +/** + * Internal implementation of {@link XmlAsyncClient}. + * + * @see XmlAsyncClient#builder() + */ +@Generated("software.amazon.awssdk:codegen") +@SdkInternalApi +final class DefaultXmlAsyncClient implements XmlAsyncClient { + private static final Logger log = LoggerFactory.getLogger(DefaultXmlAsyncClient.class); + + private final AsyncClientHandler clientHandler; + + private final AwsXmlProtocolFactory protocolFactory; + + private final SdkClientConfiguration clientConfiguration; + + protected DefaultXmlAsyncClient(SdkClientConfiguration clientConfiguration) { + this.clientHandler = new AwsAsyncClientHandler(clientConfiguration); + this.clientConfiguration = clientConfiguration; + this.protocolFactory = init(); + } + + @Override + public final String serviceName() { + return SERVICE_NAME; + } + + /** + *

+ * Performs a post operation to the xml service and has no output + *

+ * + * @param aPostOperationRequest + * @return A Java Future containing the result of the APostOperation operation returned by the service.
+ * The CompletableFuture returned by this method can be completed exceptionally with the following + * exceptions. + *
    + *
  • InvalidInputException The request was rejected because an invalid or out-of-range value was supplied + * for an input parameter.
  • + *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). + * Can be used for catch all scenarios.
  • + *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get + * credentials, etc.
  • + *
  • XmlException Base class for all service exceptions. Unknown exceptions will be thrown as an instance + * of this type.
  • + *
+ * @sample XmlAsyncClient.APostOperation + * @see AWS API + * Documentation + */ + @Override + public CompletableFuture aPostOperation(APostOperationRequest aPostOperationRequest) { + List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationRequest + .overrideConfiguration().orElse(null)); + MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector + .create("ApiCall"); + try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperation"); + + HttpResponseHandler> responseHandler = protocolFactory + .createCombinedResponseHandler(APostOperationResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + String hostPrefix = "foo-"; + String resolvedHostExpression = "foo-"; + + CompletableFuture executeFuture = clientHandler + .execute(new ClientExecutionParams() + .withOperationName("APostOperation") + .withMarshaller(new APostOperationRequestMarshaller(protocolFactory)) + .withCombinedResponseHandler(responseHandler).hostPrefixExpression(resolvedHostExpression) + .withMetricCollector(apiCallMetricCollector).withInput(aPostOperationRequest)); + AwsRequestOverrideConfiguration requestOverrideConfig = aPostOperationRequest.overrideConfiguration().orElse(null); + CompletableFuture whenCompleteFuture = null; + whenCompleteFuture = executeFuture.whenComplete((r, e) -> { + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + }); + return CompletableFutureUtils.forwardExceptionTo(whenCompleteFuture, executeFuture); + } catch (Throwable t) { + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + return CompletableFutureUtils.failedFuture(t); + } + } + + /** + *

+ * Performs a post operation to the xml service and has modelled output + *

+ * + * @param aPostOperationWithOutputRequest + * @return A Java Future containing the result of the APostOperationWithOutput operation returned by the service.
+ * The CompletableFuture returned by this method can be completed exceptionally with the following + * exceptions. + *
    + *
  • InvalidInputException The request was rejected because an invalid or out-of-range value was supplied + * for an input parameter.
  • + *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). + * Can be used for catch all scenarios.
  • + *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get + * credentials, etc.
  • + *
  • XmlException Base class for all service exceptions. Unknown exceptions will be thrown as an instance + * of this type.
  • + *
+ * @sample XmlAsyncClient.APostOperationWithOutput + * @see AWS API Documentation + */ + @Override + public CompletableFuture aPostOperationWithOutput( + APostOperationWithOutputRequest aPostOperationWithOutputRequest) { + List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationWithOutputRequest + .overrideConfiguration().orElse(null)); + MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector + .create("ApiCall"); + try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperationWithOutput"); + + HttpResponseHandler> responseHandler = protocolFactory + .createCombinedResponseHandler(APostOperationWithOutputResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + + CompletableFuture executeFuture = clientHandler + .execute(new ClientExecutionParams() + .withOperationName("APostOperationWithOutput") + .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory)) + .withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector) + .withInput(aPostOperationWithOutputRequest)); + AwsRequestOverrideConfiguration requestOverrideConfig = aPostOperationWithOutputRequest.overrideConfiguration() + .orElse(null); + CompletableFuture whenCompleteFuture = null; + whenCompleteFuture = executeFuture.whenComplete((r, e) -> { + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + }); + return CompletableFutureUtils.forwardExceptionTo(whenCompleteFuture, executeFuture); + } catch (Throwable t) { + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + return CompletableFutureUtils.failedFuture(t); + } + } + + /** + * Invokes the OperationWithChecksumRequired operation asynchronously. + * + * @param operationWithChecksumRequiredRequest + * @return A Java Future containing the result of the OperationWithChecksumRequired operation returned by the + * service.
+ * The CompletableFuture returned by this method can be completed exceptionally with the following + * exceptions. + *
    + *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). + * Can be used for catch all scenarios.
  • + *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get + * credentials, etc.
  • + *
  • XmlException Base class for all service exceptions. Unknown exceptions will be thrown as an instance + * of this type.
  • + *
+ * @sample XmlAsyncClient.OperationWithChecksumRequired + * @see AWS API Documentation + */ + @Override + public CompletableFuture operationWithChecksumRequired( + OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) { + List metricPublishers = resolveMetricPublishers(clientConfiguration, + operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); + MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector + .create("ApiCall"); + try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithChecksumRequired"); + + HttpResponseHandler> responseHandler = protocolFactory + .createCombinedResponseHandler(OperationWithChecksumRequiredResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + + CompletableFuture executeFuture = clientHandler + .execute(new ClientExecutionParams() + .withOperationName("OperationWithChecksumRequired") + .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory)) + .withCombinedResponseHandler(responseHandler) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, + HttpChecksumRequired.create()).withInput(operationWithChecksumRequiredRequest)); + AwsRequestOverrideConfiguration requestOverrideConfig = operationWithChecksumRequiredRequest.overrideConfiguration() + .orElse(null); + CompletableFuture whenCompleteFuture = null; + whenCompleteFuture = executeFuture.whenComplete((r, e) -> { + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + }); + return CompletableFutureUtils.forwardExceptionTo(whenCompleteFuture, executeFuture); + } catch (Throwable t) { + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + return CompletableFutureUtils.failedFuture(t); + } + } + + /** + * Some operation with a streaming input + * + * @param streamingInputOperationRequest + * @param requestBody + * Functional interface that can be implemented to produce the request content in a non-blocking manner. The + * size of the content is expected to be known up front. See {@link AsyncRequestBody} for specific details on + * implementing this interface as well as links to precanned implementations for common scenarios like + * uploading from a file. The service documentation for the request content is as follows 'This be a stream' + * @return A Java Future containing the result of the StreamingInputOperation operation returned by the service.
+ * The CompletableFuture returned by this method can be completed exceptionally with the following + * exceptions. + *
    + *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). + * Can be used for catch all scenarios.
  • + *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get + * credentials, etc.
  • + *
  • XmlException Base class for all service exceptions. Unknown exceptions will be thrown as an instance + * of this type.
  • + *
+ * @sample XmlAsyncClient.StreamingInputOperation + * @see AWS API Documentation + */ + @Override + public CompletableFuture streamingInputOperation( + StreamingInputOperationRequest streamingInputOperationRequest, AsyncRequestBody requestBody) { + List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingInputOperationRequest + .overrideConfiguration().orElse(null)); + MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector + .create("ApiCall"); + try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingInputOperation"); + if (!isSignerOverridden(clientConfiguration)) { + streamingInputOperationRequest = applySignerOverride(streamingInputOperationRequest, AsyncAws4Signer.create()); + } + + HttpResponseHandler> responseHandler = protocolFactory + .createCombinedResponseHandler(StreamingInputOperationResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + + CompletableFuture executeFuture = clientHandler + .execute(new ClientExecutionParams() + .withOperationName("StreamingInputOperation") + .withMarshaller( + AsyncStreamingRequestMarshaller.builder() + .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) + .asyncRequestBody(requestBody).build()).withCombinedResponseHandler(responseHandler) + .withMetricCollector(apiCallMetricCollector).withAsyncRequestBody(requestBody) + .withInput(streamingInputOperationRequest)); + AwsRequestOverrideConfiguration requestOverrideConfig = streamingInputOperationRequest.overrideConfiguration() + .orElse(null); + CompletableFuture whenCompleteFuture = null; + whenCompleteFuture = executeFuture.whenComplete((r, e) -> { + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + }); + return CompletableFutureUtils.forwardExceptionTo(whenCompleteFuture, executeFuture); + } catch (Throwable t) { + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + return CompletableFutureUtils.failedFuture(t); + } + } + + /** + * Some operation with a streaming output + * + * @param streamingOutputOperationRequest + * @param asyncResponseTransformer + * The response transformer for processing the streaming response in a non-blocking manner. See + * {@link AsyncResponseTransformer} for details on how this callback should be implemented and for links to + * precanned implementations for common scenarios like downloading to a file. The service documentation for + * the response content is as follows 'This be a stream'. + * @return A future to the transformed result of the AsyncResponseTransformer.
+ * The CompletableFuture returned by this method can be completed exceptionally with the following + * exceptions. + *
    + *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). + * Can be used for catch all scenarios.
  • + *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get + * credentials, etc.
  • + *
  • XmlException Base class for all service exceptions. Unknown exceptions will be thrown as an instance + * of this type.
  • + *
+ * @sample XmlAsyncClient.StreamingOutputOperation + * @see AWS API Documentation + */ + @Override + public CompletableFuture streamingOutputOperation( + StreamingOutputOperationRequest streamingOutputOperationRequest, + AsyncResponseTransformer asyncResponseTransformer) { + List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingOutputOperationRequest + .overrideConfiguration().orElse(null)); + MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector + .create("ApiCall"); + try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingOutputOperation"); + + HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( + StreamingOutputOperationResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(true)); + + HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); + + CompletableFuture executeFuture = clientHandler.execute( + new ClientExecutionParams() + .withOperationName("StreamingOutputOperation") + .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withMetricCollector(apiCallMetricCollector).withInput(streamingOutputOperationRequest), + asyncResponseTransformer); + AwsRequestOverrideConfiguration requestOverrideConfig = streamingOutputOperationRequest.overrideConfiguration() + .orElse(null); + CompletableFuture whenCompleteFuture = null; + whenCompleteFuture = executeFuture.whenComplete((r, e) -> { + if (e != null) { + runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", + () -> asyncResponseTransformer.exceptionOccurred(e)); + } + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + }); + return CompletableFutureUtils.forwardExceptionTo(whenCompleteFuture, executeFuture); + } catch (Throwable t) { + runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", + () -> asyncResponseTransformer.exceptionOccurred(t)); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + return CompletableFutureUtils.failedFuture(t); + } + } + + @Override + public void close() { + clientHandler.close(); + } + + private AwsXmlProtocolFactory init() { + return AwsXmlProtocolFactory + .builder() + .registerModeledException( + ExceptionMetadata.builder().errorCode("InvalidInput") + .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()) + .clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(XmlException::builder).build(); + } + + private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration, + RequestOverrideConfiguration requestOverrideConfiguration) { + List publishers = null; + if (requestOverrideConfiguration != null) { + publishers = requestOverrideConfiguration.metricPublishers(); + } + if (publishers == null || publishers.isEmpty()) { + publishers = clientConfiguration.option(SdkClientOption.METRIC_PUBLISHERS); + } + if (publishers == null) { + publishers = Collections.emptyList(); + } + return publishers; + } + + private T applySignerOverride(T request, Signer signer) { + if (request.overrideConfiguration().flatMap(c -> c.signer()).isPresent()) { + return request; + } + Consumer signerOverride = b -> b.signer(signer).build(); + AwsRequestOverrideConfiguration overrideConfiguration = request.overrideConfiguration() + .map(c -> c.toBuilder().applyMutation(signerOverride).build()) + .orElse((AwsRequestOverrideConfiguration.builder().applyMutation(signerOverride).build())); + return (T) request.toBuilder().overrideConfiguration(overrideConfiguration).build(); + } + + private static boolean isSignerOverridden(SdkClientConfiguration clientConfiguration) { + return Boolean.TRUE.equals(clientConfiguration.option(SdkClientOption.SIGNER_OVERRIDDEN)); + } +} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-client-class.java new file mode 100644 index 000000000000..05778de0bc9c --- /dev/null +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-client-class.java @@ -0,0 +1,349 @@ +package software.amazon.awssdk.services.xml; + +import java.util.Collections; +import java.util.List; +import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.awscore.client.handler.AwsSyncClientHandler; +import software.amazon.awssdk.awscore.exception.AwsServiceException; +import software.amazon.awssdk.core.RequestOverrideConfiguration; +import software.amazon.awssdk.core.Response; +import software.amazon.awssdk.core.client.config.SdkClientConfiguration; +import software.amazon.awssdk.core.client.config.SdkClientOption; +import software.amazon.awssdk.core.client.handler.ClientExecutionParams; +import software.amazon.awssdk.core.client.handler.SyncClientHandler; +import software.amazon.awssdk.core.exception.SdkClientException; +import software.amazon.awssdk.core.http.HttpResponseHandler; +import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; +import software.amazon.awssdk.core.interceptor.trait.HttpChecksumRequired; +import software.amazon.awssdk.core.metrics.CoreMetric; +import software.amazon.awssdk.core.runtime.transform.StreamingRequestMarshaller; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.core.sync.ResponseTransformer; +import software.amazon.awssdk.metrics.MetricCollector; +import software.amazon.awssdk.metrics.MetricPublisher; +import software.amazon.awssdk.metrics.NoOpMetricCollector; +import software.amazon.awssdk.protocols.core.ExceptionMetadata; +import software.amazon.awssdk.protocols.xml.AwsXmlProtocolFactory; +import software.amazon.awssdk.protocols.xml.XmlOperationMetadata; +import software.amazon.awssdk.services.xml.model.APostOperationRequest; +import software.amazon.awssdk.services.xml.model.APostOperationResponse; +import software.amazon.awssdk.services.xml.model.APostOperationWithOutputRequest; +import software.amazon.awssdk.services.xml.model.APostOperationWithOutputResponse; +import software.amazon.awssdk.services.xml.model.InvalidInputException; +import software.amazon.awssdk.services.xml.model.OperationWithChecksumRequiredRequest; +import software.amazon.awssdk.services.xml.model.OperationWithChecksumRequiredResponse; +import software.amazon.awssdk.services.xml.model.StreamingInputOperationRequest; +import software.amazon.awssdk.services.xml.model.StreamingInputOperationResponse; +import software.amazon.awssdk.services.xml.model.StreamingOutputOperationRequest; +import software.amazon.awssdk.services.xml.model.StreamingOutputOperationResponse; +import software.amazon.awssdk.services.xml.model.XmlException; +import software.amazon.awssdk.services.xml.transform.APostOperationRequestMarshaller; +import software.amazon.awssdk.services.xml.transform.APostOperationWithOutputRequestMarshaller; +import software.amazon.awssdk.services.xml.transform.OperationWithChecksumRequiredRequestMarshaller; +import software.amazon.awssdk.services.xml.transform.StreamingInputOperationRequestMarshaller; +import software.amazon.awssdk.services.xml.transform.StreamingOutputOperationRequestMarshaller; +import software.amazon.awssdk.utils.Logger; + +/** + * Internal implementation of {@link XmlClient}. + * + * @see XmlClient#builder() + */ +@Generated("software.amazon.awssdk:codegen") +@SdkInternalApi +final class DefaultXmlClient implements XmlClient { + private static final Logger log = Logger.loggerFor(DefaultXmlClient.class); + + private final SyncClientHandler clientHandler; + + private final AwsXmlProtocolFactory protocolFactory; + + private final SdkClientConfiguration clientConfiguration; + + protected DefaultXmlClient(SdkClientConfiguration clientConfiguration) { + this.clientHandler = new AwsSyncClientHandler(clientConfiguration); + this.clientConfiguration = clientConfiguration; + this.protocolFactory = init(); + } + + @Override + public final String serviceName() { + return SERVICE_NAME; + } + + /** + *

+ * Performs a post operation to the xml service and has no output + *

+ * + * @param aPostOperationRequest + * @return Result of the APostOperation operation returned by the service. + * @throws InvalidInputException + * The request was rejected because an invalid or out-of-range value was supplied for an input parameter. + * @throws SdkException + * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for + * catch all scenarios. + * @throws SdkClientException + * If any client side error occurs such as an IO related failure, failure to get credentials, etc. + * @throws XmlException + * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. + * @sample XmlClient.APostOperation + * @see AWS API + * Documentation + */ + @Override + public APostOperationResponse aPostOperation(APostOperationRequest aPostOperationRequest) throws InvalidInputException, + AwsServiceException, SdkClientException, XmlException { + + HttpResponseHandler> responseHandler = protocolFactory.createCombinedResponseHandler( + APostOperationResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationRequest + .overrideConfiguration().orElse(null)); + MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector + .create("ApiCall"); + try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperation"); + String hostPrefix = "foo-"; + String resolvedHostExpression = "foo-"; + + return clientHandler.execute(new ClientExecutionParams() + .withOperationName("APostOperation").withCombinedResponseHandler(responseHandler) + .withMetricCollector(apiCallMetricCollector).hostPrefixExpression(resolvedHostExpression) + .withInput(aPostOperationRequest).withMarshaller(new APostOperationRequestMarshaller(protocolFactory))); + } finally { + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + } + } + + /** + *

+ * Performs a post operation to the xml service and has modelled output + *

+ * + * @param aPostOperationWithOutputRequest + * @return Result of the APostOperationWithOutput operation returned by the service. + * @throws InvalidInputException + * The request was rejected because an invalid or out-of-range value was supplied for an input parameter. + * @throws SdkException + * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for + * catch all scenarios. + * @throws SdkClientException + * If any client side error occurs such as an IO related failure, failure to get credentials, etc. + * @throws XmlException + * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. + * @sample XmlClient.APostOperationWithOutput + * @see AWS API Documentation + */ + @Override + public APostOperationWithOutputResponse aPostOperationWithOutput( + APostOperationWithOutputRequest aPostOperationWithOutputRequest) throws InvalidInputException, AwsServiceException, + SdkClientException, XmlException { + + HttpResponseHandler> responseHandler = protocolFactory + .createCombinedResponseHandler(APostOperationWithOutputResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationWithOutputRequest + .overrideConfiguration().orElse(null)); + MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector + .create("ApiCall"); + try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperationWithOutput"); + + return clientHandler + .execute(new ClientExecutionParams() + .withOperationName("APostOperationWithOutput").withCombinedResponseHandler(responseHandler) + .withMetricCollector(apiCallMetricCollector).withInput(aPostOperationWithOutputRequest) + .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory))); + } finally { + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + } + } + + /** + * Invokes the OperationWithChecksumRequired operation. + * + * @param operationWithChecksumRequiredRequest + * @return Result of the OperationWithChecksumRequired operation returned by the service. + * @throws SdkException + * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for + * catch all scenarios. + * @throws SdkClientException + * If any client side error occurs such as an IO related failure, failure to get credentials, etc. + * @throws XmlException + * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. + * @sample XmlClient.OperationWithChecksumRequired + * @see AWS API Documentation + */ + @Override + public OperationWithChecksumRequiredResponse operationWithChecksumRequired( + OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) throws AwsServiceException, + SdkClientException, XmlException { + + HttpResponseHandler> responseHandler = protocolFactory + .createCombinedResponseHandler(OperationWithChecksumRequiredResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + List metricPublishers = resolveMetricPublishers(clientConfiguration, + operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); + MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector + .create("ApiCall"); + try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithChecksumRequired"); + + return clientHandler + .execute(new ClientExecutionParams() + .withOperationName("OperationWithChecksumRequired") + .withCombinedResponseHandler(responseHandler) + .withMetricCollector(apiCallMetricCollector) + .withInput(operationWithChecksumRequiredRequest) + .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, + HttpChecksumRequired.create()) + .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory))); + } finally { + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + } + } + + /** + * Some operation with a streaming input + * + * @param streamingInputOperationRequest + * @param requestBody + * The content to send to the service. A {@link RequestBody} can be created using one of several factory + * methods for various sources of data. For example, to create a request body from a file you can do the + * following. + * + *
+     * {@code RequestBody.fromFile(new File("myfile.txt"))}
+     * 
+ * + * See documentation in {@link RequestBody} for additional details and which sources of data are supported. + * The service documentation for the request content is as follows 'This be a stream' + * @return Result of the StreamingInputOperation operation returned by the service. + * @throws SdkException + * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for + * catch all scenarios. + * @throws SdkClientException + * If any client side error occurs such as an IO related failure, failure to get credentials, etc. + * @throws XmlException + * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. + * @sample XmlClient.StreamingInputOperation + * @see AWS API Documentation + */ + @Override + public StreamingInputOperationResponse streamingInputOperation(StreamingInputOperationRequest streamingInputOperationRequest, + RequestBody requestBody) throws AwsServiceException, SdkClientException, XmlException { + + HttpResponseHandler> responseHandler = protocolFactory + .createCombinedResponseHandler(StreamingInputOperationResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingInputOperationRequest + .overrideConfiguration().orElse(null)); + MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector + .create("ApiCall"); + try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingInputOperation"); + + return clientHandler + .execute(new ClientExecutionParams() + .withOperationName("StreamingInputOperation") + .withCombinedResponseHandler(responseHandler) + .withMetricCollector(apiCallMetricCollector) + .withInput(streamingInputOperationRequest) + .withRequestBody(requestBody) + .withMarshaller( + StreamingRequestMarshaller.builder() + .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) + .requestBody(requestBody).build())); + } finally { + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + } + } + + /** + * Some operation with a streaming output + * + * @param streamingOutputOperationRequest + * @param responseTransformer + * Functional interface for processing the streamed response content. The unmarshalled + * StreamingOutputOperationResponse and an InputStream to the response content are provided as parameters to + * the callback. The callback may return a transformed type which will be the return value of this method. + * See {@link software.amazon.awssdk.core.sync.ResponseTransformer} for details on implementing this + * interface and for links to pre-canned implementations for common scenarios like downloading to a file. The + * service documentation for the response content is as follows 'This be a stream'. + * @return The transformed result of the ResponseTransformer. + * @throws SdkException + * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for + * catch all scenarios. + * @throws SdkClientException + * If any client side error occurs such as an IO related failure, failure to get credentials, etc. + * @throws XmlException + * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. + * @sample XmlClient.StreamingOutputOperation + * @see AWS API Documentation + */ + @Override + public ReturnT streamingOutputOperation(StreamingOutputOperationRequest streamingOutputOperationRequest, + ResponseTransformer responseTransformer) throws AwsServiceException, + SdkClientException, XmlException { + + HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( + StreamingOutputOperationResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(true)); + + HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); + List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingOutputOperationRequest + .overrideConfiguration().orElse(null)); + MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector + .create("ApiCall"); + try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingOutputOperation"); + + return clientHandler.execute( + new ClientExecutionParams() + .withOperationName("StreamingOutputOperation").withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler).withInput(streamingOutputOperationRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)), responseTransformer); + } finally { + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + } + } + + private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration, + RequestOverrideConfiguration requestOverrideConfiguration) { + List publishers = null; + if (requestOverrideConfiguration != null) { + publishers = requestOverrideConfiguration.metricPublishers(); + } + if (publishers == null || publishers.isEmpty()) { + publishers = clientConfiguration.option(SdkClientOption.METRIC_PUBLISHERS); + } + if (publishers == null) { + publishers = Collections.emptyList(); + } + return publishers; + } + + private AwsXmlProtocolFactory init() { + return AwsXmlProtocolFactory + .builder() + .registerModeledException( + ExceptionMetadata.builder().errorCode("InvalidInput") + .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()) + .clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(XmlException::builder).build(); + } + + @Override + public void close() { + clientHandler.close(); + } +} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/eventstream/customization.config b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/eventstream/customization.config new file mode 100644 index 000000000000..21b15d9542cb --- /dev/null +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/eventstream/customization.config @@ -0,0 +1,3 @@ +{ + "underscoresInNameBehavior": "ALLOW" +} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/eventstream/service-2.json b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/eventstream/service-2.json index 4b862fd96839..9d013117fcad 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/eventstream/service-2.json +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/eventstream/service-2.json @@ -404,16 +404,16 @@ "EventOne": { "shape": "EventOne" }, - "event-two": { + "event_two": { "shape": "EventTwo" }, "secondEventOne": { "shape": "EventOne" }, - "second-event-two": { + "second_event_two": { "shape": "EventTwo" }, - "third-event-two-customizedVisitMethod": { + "third_event_two_customizedVisitMethod": { "shape": "EventTwo" } }, diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/alltypesrequest.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/alltypesrequest.java index 07aaee1f7d33..98e800319039 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/alltypesrequest.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/alltypesrequest.java @@ -389,6 +389,13 @@ SdkField. builder(MarshallingType.SDK_BYTES) .memberName("EnumType").getter(getter(AllTypesRequest::enumTypeAsString)).setter(setter(Builder::enumType)) .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("EnumType").build()).build(); + private static final SdkField UNDERSCORE_NAME_TYPE_FIELD = SdkField + . builder(MarshallingType.SDK_POJO).memberName("Underscore_Name_Type") + .getter(getter(AllTypesRequest::underscore_Name_Type)).setter(setter(Builder::underscore_Name_Type)) + .constructor(Underscore_Name_Type::builder) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("Underscore_Name_Type").build()) + .build(); + private static final List> SDK_FIELDS = Collections.unmodifiableList(Arrays.asList(STRING_MEMBER_FIELD, INTEGER_MEMBER_FIELD, BOOLEAN_MEMBER_FIELD, FLOAT_MEMBER_FIELD, DOUBLE_MEMBER_FIELD, LONG_MEMBER_FIELD, SIMPLE_LIST_FIELD, LIST_OF_ENUMS_FIELD, LIST_OF_MAPS_FIELD, LIST_OF_STRUCTS_FIELD, @@ -397,7 +404,8 @@ SdkField. builder(MarshallingType.SDK_BYTES) MAP_OF_STRING_TO_ENUM_FIELD, MAP_OF_ENUM_TO_SIMPLE_STRUCT_FIELD, MAP_OF_ENUM_TO_LIST_OF_ENUMS_FIELD, MAP_OF_ENUM_TO_MAP_OF_STRING_TO_ENUM_FIELD, TIMESTAMP_MEMBER_FIELD, STRUCT_WITH_NESTED_TIMESTAMP_MEMBER_FIELD, BLOB_ARG_FIELD, STRUCT_WITH_NESTED_BLOB_FIELD, BLOB_MAP_FIELD, LIST_OF_BLOBS_FIELD, RECURSIVE_STRUCT_FIELD, - POLYMORPHIC_TYPE_WITH_SUB_TYPES_FIELD, POLYMORPHIC_TYPE_WITHOUT_SUB_TYPES_FIELD, ENUM_TYPE_FIELD)); + POLYMORPHIC_TYPE_WITH_SUB_TYPES_FIELD, POLYMORPHIC_TYPE_WITHOUT_SUB_TYPES_FIELD, ENUM_TYPE_FIELD, + UNDERSCORE_NAME_TYPE_FIELD)); private final String stringMember; @@ -459,6 +467,8 @@ SdkField. builder(MarshallingType.SDK_BYTES) private final String enumType; + private final Underscore_Name_Type underscore_Name_Type; + private AllTypesRequest(BuilderImpl builder) { super(builder); this.stringMember = builder.stringMember; @@ -491,6 +501,7 @@ private AllTypesRequest(BuilderImpl builder) { this.polymorphicTypeWithSubTypes = builder.polymorphicTypeWithSubTypes; this.polymorphicTypeWithoutSubTypes = builder.polymorphicTypeWithoutSubTypes; this.enumType = builder.enumType; + this.underscore_Name_Type = builder.underscore_Name_Type; } /** @@ -1138,6 +1149,15 @@ public final String enumTypeAsString() { return enumType; } + /** + * Returns the value of the Underscore_Name_Type property for this object. + * + * @return The value of the Underscore_Name_Type property for this object. + */ + public final Underscore_Name_Type underscore_Name_Type() { + return underscore_Name_Type; + } + @Override public Builder toBuilder() { return new BuilderImpl(this); @@ -1161,30 +1181,32 @@ public final int hashCode() { hashCode = 31 * hashCode + Objects.hashCode(floatMember()); hashCode = 31 * hashCode + Objects.hashCode(doubleMember()); hashCode = 31 * hashCode + Objects.hashCode(longMember()); - hashCode = 31 * hashCode + Objects.hashCode(simpleList()); - hashCode = 31 * hashCode + Objects.hashCode(listOfEnumsAsStrings()); - hashCode = 31 * hashCode + Objects.hashCode(listOfMaps()); - hashCode = 31 * hashCode + Objects.hashCode(listOfStructs()); - hashCode = 31 * hashCode + Objects.hashCode(listOfMapOfEnumToStringAsStrings()); - hashCode = 31 * hashCode + Objects.hashCode(mapOfStringToIntegerList()); - hashCode = 31 * hashCode + Objects.hashCode(mapOfStringToString()); - hashCode = 31 * hashCode + Objects.hashCode(mapOfStringToSimpleStruct()); - hashCode = 31 * hashCode + Objects.hashCode(mapOfEnumToEnumAsStrings()); - hashCode = 31 * hashCode + Objects.hashCode(mapOfEnumToStringAsStrings()); - hashCode = 31 * hashCode + Objects.hashCode(mapOfStringToEnumAsStrings()); - hashCode = 31 * hashCode + Objects.hashCode(mapOfEnumToSimpleStructAsStrings()); - hashCode = 31 * hashCode + Objects.hashCode(mapOfEnumToListOfEnumsAsStrings()); - hashCode = 31 * hashCode + Objects.hashCode(mapOfEnumToMapOfStringToEnumAsStrings()); + hashCode = 31 * hashCode + Objects.hashCode(hasSimpleList() ? simpleList() : null); + hashCode = 31 * hashCode + Objects.hashCode(hasListOfEnums() ? listOfEnumsAsStrings() : null); + hashCode = 31 * hashCode + Objects.hashCode(hasListOfMaps() ? listOfMaps() : null); + hashCode = 31 * hashCode + Objects.hashCode(hasListOfStructs() ? listOfStructs() : null); + hashCode = 31 * hashCode + Objects.hashCode(hasListOfMapOfEnumToString() ? listOfMapOfEnumToStringAsStrings() : null); + hashCode = 31 * hashCode + Objects.hashCode(hasMapOfStringToIntegerList() ? mapOfStringToIntegerList() : null); + hashCode = 31 * hashCode + Objects.hashCode(hasMapOfStringToString() ? mapOfStringToString() : null); + hashCode = 31 * hashCode + Objects.hashCode(hasMapOfStringToSimpleStruct() ? mapOfStringToSimpleStruct() : null); + hashCode = 31 * hashCode + Objects.hashCode(hasMapOfEnumToEnum() ? mapOfEnumToEnumAsStrings() : null); + hashCode = 31 * hashCode + Objects.hashCode(hasMapOfEnumToString() ? mapOfEnumToStringAsStrings() : null); + hashCode = 31 * hashCode + Objects.hashCode(hasMapOfStringToEnum() ? mapOfStringToEnumAsStrings() : null); + hashCode = 31 * hashCode + Objects.hashCode(hasMapOfEnumToSimpleStruct() ? mapOfEnumToSimpleStructAsStrings() : null); + hashCode = 31 * hashCode + Objects.hashCode(hasMapOfEnumToListOfEnums() ? mapOfEnumToListOfEnumsAsStrings() : null); + hashCode = 31 * hashCode + + Objects.hashCode(hasMapOfEnumToMapOfStringToEnum() ? mapOfEnumToMapOfStringToEnumAsStrings() : null); hashCode = 31 * hashCode + Objects.hashCode(timestampMember()); hashCode = 31 * hashCode + Objects.hashCode(structWithNestedTimestampMember()); hashCode = 31 * hashCode + Objects.hashCode(blobArg()); hashCode = 31 * hashCode + Objects.hashCode(structWithNestedBlob()); - hashCode = 31 * hashCode + Objects.hashCode(blobMap()); - hashCode = 31 * hashCode + Objects.hashCode(listOfBlobs()); + hashCode = 31 * hashCode + Objects.hashCode(hasBlobMap() ? blobMap() : null); + hashCode = 31 * hashCode + Objects.hashCode(hasListOfBlobs() ? listOfBlobs() : null); hashCode = 31 * hashCode + Objects.hashCode(recursiveStruct()); hashCode = 31 * hashCode + Objects.hashCode(polymorphicTypeWithSubTypes()); hashCode = 31 * hashCode + Objects.hashCode(polymorphicTypeWithoutSubTypes()); hashCode = 31 * hashCode + Objects.hashCode(enumTypeAsString()); + hashCode = 31 * hashCode + Objects.hashCode(underscore_Name_Type()); return hashCode; } @@ -1208,28 +1230,42 @@ public final boolean equalsBySdkFields(Object obj) { return Objects.equals(stringMember(), other.stringMember()) && Objects.equals(integerMember(), other.integerMember()) && Objects.equals(booleanMember(), other.booleanMember()) && Objects.equals(floatMember(), other.floatMember()) && Objects.equals(doubleMember(), other.doubleMember()) && Objects.equals(longMember(), other.longMember()) - && Objects.equals(simpleList(), other.simpleList()) + && hasSimpleList() == other.hasSimpleList() && Objects.equals(simpleList(), other.simpleList()) + && hasListOfEnums() == other.hasListOfEnums() && Objects.equals(listOfEnumsAsStrings(), other.listOfEnumsAsStrings()) - && Objects.equals(listOfMaps(), other.listOfMaps()) && Objects.equals(listOfStructs(), other.listOfStructs()) + && hasListOfMaps() == other.hasListOfMaps() && Objects.equals(listOfMaps(), other.listOfMaps()) + && hasListOfStructs() == other.hasListOfStructs() && Objects.equals(listOfStructs(), other.listOfStructs()) + && hasListOfMapOfEnumToString() == other.hasListOfMapOfEnumToString() && Objects.equals(listOfMapOfEnumToStringAsStrings(), other.listOfMapOfEnumToStringAsStrings()) + && hasMapOfStringToIntegerList() == other.hasMapOfStringToIntegerList() && Objects.equals(mapOfStringToIntegerList(), other.mapOfStringToIntegerList()) + && hasMapOfStringToString() == other.hasMapOfStringToString() && Objects.equals(mapOfStringToString(), other.mapOfStringToString()) + && hasMapOfStringToSimpleStruct() == other.hasMapOfStringToSimpleStruct() && Objects.equals(mapOfStringToSimpleStruct(), other.mapOfStringToSimpleStruct()) + && hasMapOfEnumToEnum() == other.hasMapOfEnumToEnum() && Objects.equals(mapOfEnumToEnumAsStrings(), other.mapOfEnumToEnumAsStrings()) + && hasMapOfEnumToString() == other.hasMapOfEnumToString() && Objects.equals(mapOfEnumToStringAsStrings(), other.mapOfEnumToStringAsStrings()) + && hasMapOfStringToEnum() == other.hasMapOfStringToEnum() && Objects.equals(mapOfStringToEnumAsStrings(), other.mapOfStringToEnumAsStrings()) + && hasMapOfEnumToSimpleStruct() == other.hasMapOfEnumToSimpleStruct() && Objects.equals(mapOfEnumToSimpleStructAsStrings(), other.mapOfEnumToSimpleStructAsStrings()) + && hasMapOfEnumToListOfEnums() == other.hasMapOfEnumToListOfEnums() && Objects.equals(mapOfEnumToListOfEnumsAsStrings(), other.mapOfEnumToListOfEnumsAsStrings()) + && hasMapOfEnumToMapOfStringToEnum() == other.hasMapOfEnumToMapOfStringToEnum() && Objects.equals(mapOfEnumToMapOfStringToEnumAsStrings(), other.mapOfEnumToMapOfStringToEnumAsStrings()) && Objects.equals(timestampMember(), other.timestampMember()) && Objects.equals(structWithNestedTimestampMember(), other.structWithNestedTimestampMember()) && Objects.equals(blobArg(), other.blobArg()) - && Objects.equals(structWithNestedBlob(), other.structWithNestedBlob()) - && Objects.equals(blobMap(), other.blobMap()) && Objects.equals(listOfBlobs(), other.listOfBlobs()) + && Objects.equals(structWithNestedBlob(), other.structWithNestedBlob()) && hasBlobMap() == other.hasBlobMap() + && Objects.equals(blobMap(), other.blobMap()) && hasListOfBlobs() == other.hasListOfBlobs() + && Objects.equals(listOfBlobs(), other.listOfBlobs()) && Objects.equals(recursiveStruct(), other.recursiveStruct()) && Objects.equals(polymorphicTypeWithSubTypes(), other.polymorphicTypeWithSubTypes()) && Objects.equals(polymorphicTypeWithoutSubTypes(), other.polymorphicTypeWithoutSubTypes()) - && Objects.equals(enumTypeAsString(), other.enumTypeAsString()); + && Objects.equals(enumTypeAsString(), other.enumTypeAsString()) + && Objects.equals(underscore_Name_Type(), other.underscore_Name_Type()); } /** @@ -1238,23 +1274,36 @@ public final boolean equalsBySdkFields(Object obj) { */ @Override public final String toString() { - return ToString.builder("AllTypesRequest").add("StringMember", stringMember()).add("IntegerMember", integerMember()) - .add("BooleanMember", booleanMember()).add("FloatMember", floatMember()).add("DoubleMember", doubleMember()) - .add("LongMember", longMember()).add("SimpleList", simpleList()).add("ListOfEnums", listOfEnumsAsStrings()) - .add("ListOfMaps", listOfMaps()).add("ListOfStructs", listOfStructs()) - .add("ListOfMapOfEnumToString", listOfMapOfEnumToStringAsStrings()) - .add("MapOfStringToIntegerList", mapOfStringToIntegerList()).add("MapOfStringToString", mapOfStringToString()) - .add("MapOfStringToSimpleStruct", mapOfStringToSimpleStruct()).add("MapOfEnumToEnum", mapOfEnumToEnumAsStrings()) - .add("MapOfEnumToString", mapOfEnumToStringAsStrings()).add("MapOfStringToEnum", mapOfStringToEnumAsStrings()) - .add("MapOfEnumToSimpleStruct", mapOfEnumToSimpleStructAsStrings()) - .add("MapOfEnumToListOfEnums", mapOfEnumToListOfEnumsAsStrings()) - .add("MapOfEnumToMapOfStringToEnum", mapOfEnumToMapOfStringToEnumAsStrings()) + return ToString + .builder("AllTypesRequest") + .add("StringMember", stringMember()) + .add("IntegerMember", integerMember()) + .add("BooleanMember", booleanMember()) + .add("FloatMember", floatMember()) + .add("DoubleMember", doubleMember()) + .add("LongMember", longMember()) + .add("SimpleList", hasSimpleList() ? simpleList() : null) + .add("ListOfEnums", hasListOfEnums() ? listOfEnumsAsStrings() : null) + .add("ListOfMaps", hasListOfMaps() ? listOfMaps() : null) + .add("ListOfStructs", hasListOfStructs() ? listOfStructs() : null) + .add("ListOfMapOfEnumToString", hasListOfMapOfEnumToString() ? listOfMapOfEnumToStringAsStrings() : null) + .add("MapOfStringToIntegerList", hasMapOfStringToIntegerList() ? mapOfStringToIntegerList() : null) + .add("MapOfStringToString", hasMapOfStringToString() ? mapOfStringToString() : null) + .add("MapOfStringToSimpleStruct", hasMapOfStringToSimpleStruct() ? mapOfStringToSimpleStruct() : null) + .add("MapOfEnumToEnum", hasMapOfEnumToEnum() ? mapOfEnumToEnumAsStrings() : null) + .add("MapOfEnumToString", hasMapOfEnumToString() ? mapOfEnumToStringAsStrings() : null) + .add("MapOfStringToEnum", hasMapOfStringToEnum() ? mapOfStringToEnumAsStrings() : null) + .add("MapOfEnumToSimpleStruct", hasMapOfEnumToSimpleStruct() ? mapOfEnumToSimpleStructAsStrings() : null) + .add("MapOfEnumToListOfEnums", hasMapOfEnumToListOfEnums() ? mapOfEnumToListOfEnumsAsStrings() : null) + .add("MapOfEnumToMapOfStringToEnum", + hasMapOfEnumToMapOfStringToEnum() ? mapOfEnumToMapOfStringToEnumAsStrings() : null) .add("TimestampMember", timestampMember()) .add("StructWithNestedTimestampMember", structWithNestedTimestampMember()).add("BlobArg", blobArg()) - .add("StructWithNestedBlob", structWithNestedBlob()).add("BlobMap", blobMap()).add("ListOfBlobs", listOfBlobs()) - .add("RecursiveStruct", recursiveStruct()).add("PolymorphicTypeWithSubTypes", polymorphicTypeWithSubTypes()) + .add("StructWithNestedBlob", structWithNestedBlob()).add("BlobMap", hasBlobMap() ? blobMap() : null) + .add("ListOfBlobs", hasListOfBlobs() ? listOfBlobs() : null).add("RecursiveStruct", recursiveStruct()) + .add("PolymorphicTypeWithSubTypes", polymorphicTypeWithSubTypes()) .add("PolymorphicTypeWithoutSubTypes", polymorphicTypeWithoutSubTypes()).add("EnumType", enumTypeAsString()) - .build(); + .add("Underscore_Name_Type", underscore_Name_Type()).build(); } public final Optional getValueForField(String fieldName, Class clazz) { @@ -1319,6 +1368,8 @@ public final Optional getValueForField(String fieldName, Class clazz) return Optional.ofNullable(clazz.cast(polymorphicTypeWithoutSubTypes())); case "EnumType": return Optional.ofNullable(clazz.cast(enumTypeAsString())); + case "Underscore_Name_Type": + return Optional.ofNullable(clazz.cast(underscore_Name_Type())); default: return Optional.empty(); } @@ -1854,6 +1905,33 @@ default Builder polymorphicTypeWithoutSubTypes(Consumer poly */ Builder enumType(EnumType enumType); + /** + * Sets the value of the Underscore_Name_Type property for this object. + * + * @param underscore_Name_Type + * The new value for the Underscore_Name_Type property for this object. + * @return Returns a reference to this object so that method calls can be chained together. + */ + Builder underscore_Name_Type(Underscore_Name_Type underscore_Name_Type); + + /** + * Sets the value of the Underscore_Name_Type property for this object. + * + * This is a convenience that creates an instance of the {@link Underscore_Name_Type.Builder} avoiding the need + * to create one manually via {@link Underscore_Name_Type#builder()}. + * + * When the {@link Consumer} completes, {@link Underscore_Name_Type.Builder#build()} is called immediately and + * its result is passed to {@link #underscore_Name_Type(Underscore_Name_Type)}. + * + * @param underscore_Name_Type + * a consumer that will call methods on {@link Underscore_Name_Type.Builder} + * @return Returns a reference to this object so that method calls can be chained together. + * @see #underscore_Name_Type(Underscore_Name_Type) + */ + default Builder underscore_Name_Type(Consumer underscore_Name_Type) { + return underscore_Name_Type(Underscore_Name_Type.builder().applyMutation(underscore_Name_Type).build()); + } + @Override Builder overrideConfiguration(AwsRequestOverrideConfiguration overrideConfiguration); @@ -1922,6 +2000,8 @@ static final class BuilderImpl extends JsonProtocolTestsRequest.BuilderImpl impl private String enumType; + private Underscore_Name_Type underscore_Name_Type; + private BuilderImpl() { } @@ -1957,6 +2037,7 @@ private BuilderImpl(AllTypesRequest model) { polymorphicTypeWithSubTypes(model.polymorphicTypeWithSubTypes); polymorphicTypeWithoutSubTypes(model.polymorphicTypeWithoutSubTypes); enumType(model.enumType); + underscore_Name_Type(model.underscore_Name_Type); } public final String getStringMember() { @@ -2044,6 +2125,9 @@ public final void setLongMember(Long longMember) { } public final Collection getSimpleList() { + if (simpleList instanceof SdkAutoConstructList) { + return null; + } return simpleList; } @@ -2065,6 +2149,9 @@ public final void setSimpleList(Collection simpleList) { } public final Collection getListOfEnums() { + if (listOfEnums instanceof SdkAutoConstructList) { + return null; + } return listOfEnums; } @@ -2099,6 +2186,9 @@ public final void setListOfEnums(Collection listOfEnums) { } public final Collection> getListOfMaps() { + if (listOfMaps instanceof SdkAutoConstructList) { + return null; + } return listOfMaps; } @@ -2120,6 +2210,9 @@ public final void setListOfMaps(Collection> listOf } public final Collection getListOfStructs() { + if (listOfStructs instanceof SdkAutoConstructList) { + return null; + } return listOfStructs != null ? listOfStructs.stream().map(SimpleStruct::toBuilder).collect(Collectors.toList()) : null; } @@ -2150,6 +2243,9 @@ public final void setListOfStructs(Collection listOfSt } public final Collection> getListOfMapOfEnumToString() { + if (listOfMapOfEnumToString instanceof SdkAutoConstructList) { + return null; + } return listOfMapOfEnumToString; } @@ -2171,6 +2267,9 @@ public final void setListOfMapOfEnumToString(Collection> getMapOfStringToIntegerList() { + if (mapOfStringToIntegerList instanceof SdkAutoConstructMap) { + return null; + } return mapOfStringToIntegerList; } @@ -2185,6 +2284,9 @@ public final void setMapOfStringToIntegerList(Map getMapOfStringToString() { + if (mapOfStringToString instanceof SdkAutoConstructMap) { + return null; + } return mapOfStringToString; } @@ -2199,6 +2301,9 @@ public final void setMapOfStringToString(Map mapOfStringToString } public final Map getMapOfStringToSimpleStruct() { + if (mapOfStringToSimpleStruct instanceof SdkAutoConstructMap) { + return null; + } return mapOfStringToSimpleStruct != null ? CollectionUtils.mapValues(mapOfStringToSimpleStruct, SimpleStruct::toBuilder) : null; } @@ -2214,6 +2319,9 @@ public final void setMapOfStringToSimpleStruct(Map getMapOfEnumToEnum() { + if (mapOfEnumToEnum instanceof SdkAutoConstructMap) { + return null; + } return mapOfEnumToEnum; } @@ -2234,6 +2342,9 @@ public final void setMapOfEnumToEnum(Map mapOfEnumToEnum) { } public final Map getMapOfEnumToString() { + if (mapOfEnumToString instanceof SdkAutoConstructMap) { + return null; + } return mapOfEnumToString; } @@ -2254,6 +2365,9 @@ public final void setMapOfEnumToString(Map mapOfEnumToString) { } public final Map getMapOfStringToEnum() { + if (mapOfStringToEnum instanceof SdkAutoConstructMap) { + return null; + } return mapOfStringToEnum; } @@ -2274,6 +2388,9 @@ public final void setMapOfStringToEnum(Map mapOfStringToEnum) { } public final Map getMapOfEnumToSimpleStruct() { + if (mapOfEnumToSimpleStruct instanceof SdkAutoConstructMap) { + return null; + } return mapOfEnumToSimpleStruct != null ? CollectionUtils.mapValues(mapOfEnumToSimpleStruct, SimpleStruct::toBuilder) : null; } @@ -2295,6 +2412,9 @@ public final void setMapOfEnumToSimpleStruct(Map> getMapOfEnumToListOfEnums() { + if (mapOfEnumToListOfEnums instanceof SdkAutoConstructMap) { + return null; + } return mapOfEnumToListOfEnums; } @@ -2315,6 +2435,9 @@ public final void setMapOfEnumToListOfEnums(Map> getMapOfEnumToMapOfStringToEnum() { + if (mapOfEnumToMapOfStringToEnum instanceof SdkAutoConstructMap) { + return null; + } return mapOfEnumToMapOfStringToEnum; } @@ -2392,6 +2515,9 @@ public final void setStructWithNestedBlob(StructWithNestedBlobType.BuilderImpl s } public final Map getBlobMap() { + if (blobMap instanceof SdkAutoConstructMap) { + return null; + } return blobMap == null ? null : blobMap.entrySet().stream() .collect(Collectors.toMap(e -> e.getKey(), e -> e.getValue().asByteBuffer())); } @@ -2408,6 +2534,9 @@ public final void setBlobMap(Map blobMap) { } public final List getListOfBlobs() { + if (listOfBlobs instanceof SdkAutoConstructList) { + return null; + } return listOfBlobs == null ? null : listOfBlobs.stream().map(SdkBytes::asByteBuffer).collect(Collectors.toList()); } @@ -2492,6 +2621,20 @@ public final void setEnumType(String enumType) { this.enumType = enumType; } + public final Underscore_Name_Type.Builder getUnderscore_Name_Type() { + return underscore_Name_Type != null ? underscore_Name_Type.toBuilder() : null; + } + + @Override + public final Builder underscore_Name_Type(Underscore_Name_Type underscore_Name_Type) { + this.underscore_Name_Type = underscore_Name_Type; + return this; + } + + public final void setUnderscore_Name_Type(Underscore_Name_Type.BuilderImpl underscore_Name_Type) { + this.underscore_Name_Type = underscore_Name_Type != null ? underscore_Name_Type.build() : null; + } + @Override public Builder overrideConfiguration(AwsRequestOverrideConfiguration overrideConfiguration) { super.overrideConfiguration(overrideConfiguration); diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/alltypesresponse.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/alltypesresponse.java index 891301163321..4ff4ae49b9f5 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/alltypesresponse.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/alltypesresponse.java @@ -388,6 +388,13 @@ SdkField. builder(MarshallingType.SDK_BYTES) .memberName("EnumType").getter(getter(AllTypesResponse::enumTypeAsString)).setter(setter(Builder::enumType)) .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("EnumType").build()).build(); + private static final SdkField UNDERSCORE_NAME_TYPE_FIELD = SdkField + . builder(MarshallingType.SDK_POJO).memberName("Underscore_Name_Type") + .getter(getter(AllTypesResponse::underscore_Name_Type)).setter(setter(Builder::underscore_Name_Type)) + .constructor(Underscore_Name_Type::builder) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("Underscore_Name_Type").build()) + .build(); + private static final List> SDK_FIELDS = Collections.unmodifiableList(Arrays.asList(STRING_MEMBER_FIELD, INTEGER_MEMBER_FIELD, BOOLEAN_MEMBER_FIELD, FLOAT_MEMBER_FIELD, DOUBLE_MEMBER_FIELD, LONG_MEMBER_FIELD, SIMPLE_LIST_FIELD, LIST_OF_ENUMS_FIELD, LIST_OF_MAPS_FIELD, LIST_OF_STRUCTS_FIELD, @@ -396,7 +403,8 @@ SdkField. builder(MarshallingType.SDK_BYTES) MAP_OF_STRING_TO_ENUM_FIELD, MAP_OF_ENUM_TO_SIMPLE_STRUCT_FIELD, MAP_OF_ENUM_TO_LIST_OF_ENUMS_FIELD, MAP_OF_ENUM_TO_MAP_OF_STRING_TO_ENUM_FIELD, TIMESTAMP_MEMBER_FIELD, STRUCT_WITH_NESTED_TIMESTAMP_MEMBER_FIELD, BLOB_ARG_FIELD, STRUCT_WITH_NESTED_BLOB_FIELD, BLOB_MAP_FIELD, LIST_OF_BLOBS_FIELD, RECURSIVE_STRUCT_FIELD, - POLYMORPHIC_TYPE_WITH_SUB_TYPES_FIELD, POLYMORPHIC_TYPE_WITHOUT_SUB_TYPES_FIELD, ENUM_TYPE_FIELD)); + POLYMORPHIC_TYPE_WITH_SUB_TYPES_FIELD, POLYMORPHIC_TYPE_WITHOUT_SUB_TYPES_FIELD, ENUM_TYPE_FIELD, + UNDERSCORE_NAME_TYPE_FIELD)); private final String stringMember; @@ -458,6 +466,8 @@ SdkField. builder(MarshallingType.SDK_BYTES) private final String enumType; + private final Underscore_Name_Type underscore_Name_Type; + private AllTypesResponse(BuilderImpl builder) { super(builder); this.stringMember = builder.stringMember; @@ -490,6 +500,7 @@ private AllTypesResponse(BuilderImpl builder) { this.polymorphicTypeWithSubTypes = builder.polymorphicTypeWithSubTypes; this.polymorphicTypeWithoutSubTypes = builder.polymorphicTypeWithoutSubTypes; this.enumType = builder.enumType; + this.underscore_Name_Type = builder.underscore_Name_Type; } /** @@ -1137,6 +1148,15 @@ public final String enumTypeAsString() { return enumType; } + /** + * Returns the value of the Underscore_Name_Type property for this object. + * + * @return The value of the Underscore_Name_Type property for this object. + */ + public final Underscore_Name_Type underscore_Name_Type() { + return underscore_Name_Type; + } + @Override public Builder toBuilder() { return new BuilderImpl(this); @@ -1160,30 +1180,32 @@ public final int hashCode() { hashCode = 31 * hashCode + Objects.hashCode(floatMember()); hashCode = 31 * hashCode + Objects.hashCode(doubleMember()); hashCode = 31 * hashCode + Objects.hashCode(longMember()); - hashCode = 31 * hashCode + Objects.hashCode(simpleList()); - hashCode = 31 * hashCode + Objects.hashCode(listOfEnumsAsStrings()); - hashCode = 31 * hashCode + Objects.hashCode(listOfMaps()); - hashCode = 31 * hashCode + Objects.hashCode(listOfStructs()); - hashCode = 31 * hashCode + Objects.hashCode(listOfMapOfEnumToStringAsStrings()); - hashCode = 31 * hashCode + Objects.hashCode(mapOfStringToIntegerList()); - hashCode = 31 * hashCode + Objects.hashCode(mapOfStringToString()); - hashCode = 31 * hashCode + Objects.hashCode(mapOfStringToSimpleStruct()); - hashCode = 31 * hashCode + Objects.hashCode(mapOfEnumToEnumAsStrings()); - hashCode = 31 * hashCode + Objects.hashCode(mapOfEnumToStringAsStrings()); - hashCode = 31 * hashCode + Objects.hashCode(mapOfStringToEnumAsStrings()); - hashCode = 31 * hashCode + Objects.hashCode(mapOfEnumToSimpleStructAsStrings()); - hashCode = 31 * hashCode + Objects.hashCode(mapOfEnumToListOfEnumsAsStrings()); - hashCode = 31 * hashCode + Objects.hashCode(mapOfEnumToMapOfStringToEnumAsStrings()); + hashCode = 31 * hashCode + Objects.hashCode(hasSimpleList() ? simpleList() : null); + hashCode = 31 * hashCode + Objects.hashCode(hasListOfEnums() ? listOfEnumsAsStrings() : null); + hashCode = 31 * hashCode + Objects.hashCode(hasListOfMaps() ? listOfMaps() : null); + hashCode = 31 * hashCode + Objects.hashCode(hasListOfStructs() ? listOfStructs() : null); + hashCode = 31 * hashCode + Objects.hashCode(hasListOfMapOfEnumToString() ? listOfMapOfEnumToStringAsStrings() : null); + hashCode = 31 * hashCode + Objects.hashCode(hasMapOfStringToIntegerList() ? mapOfStringToIntegerList() : null); + hashCode = 31 * hashCode + Objects.hashCode(hasMapOfStringToString() ? mapOfStringToString() : null); + hashCode = 31 * hashCode + Objects.hashCode(hasMapOfStringToSimpleStruct() ? mapOfStringToSimpleStruct() : null); + hashCode = 31 * hashCode + Objects.hashCode(hasMapOfEnumToEnum() ? mapOfEnumToEnumAsStrings() : null); + hashCode = 31 * hashCode + Objects.hashCode(hasMapOfEnumToString() ? mapOfEnumToStringAsStrings() : null); + hashCode = 31 * hashCode + Objects.hashCode(hasMapOfStringToEnum() ? mapOfStringToEnumAsStrings() : null); + hashCode = 31 * hashCode + Objects.hashCode(hasMapOfEnumToSimpleStruct() ? mapOfEnumToSimpleStructAsStrings() : null); + hashCode = 31 * hashCode + Objects.hashCode(hasMapOfEnumToListOfEnums() ? mapOfEnumToListOfEnumsAsStrings() : null); + hashCode = 31 * hashCode + + Objects.hashCode(hasMapOfEnumToMapOfStringToEnum() ? mapOfEnumToMapOfStringToEnumAsStrings() : null); hashCode = 31 * hashCode + Objects.hashCode(timestampMember()); hashCode = 31 * hashCode + Objects.hashCode(structWithNestedTimestampMember()); hashCode = 31 * hashCode + Objects.hashCode(blobArg()); hashCode = 31 * hashCode + Objects.hashCode(structWithNestedBlob()); - hashCode = 31 * hashCode + Objects.hashCode(blobMap()); - hashCode = 31 * hashCode + Objects.hashCode(listOfBlobs()); + hashCode = 31 * hashCode + Objects.hashCode(hasBlobMap() ? blobMap() : null); + hashCode = 31 * hashCode + Objects.hashCode(hasListOfBlobs() ? listOfBlobs() : null); hashCode = 31 * hashCode + Objects.hashCode(recursiveStruct()); hashCode = 31 * hashCode + Objects.hashCode(polymorphicTypeWithSubTypes()); hashCode = 31 * hashCode + Objects.hashCode(polymorphicTypeWithoutSubTypes()); hashCode = 31 * hashCode + Objects.hashCode(enumTypeAsString()); + hashCode = 31 * hashCode + Objects.hashCode(underscore_Name_Type()); return hashCode; } @@ -1207,28 +1229,42 @@ public final boolean equalsBySdkFields(Object obj) { return Objects.equals(stringMember(), other.stringMember()) && Objects.equals(integerMember(), other.integerMember()) && Objects.equals(booleanMember(), other.booleanMember()) && Objects.equals(floatMember(), other.floatMember()) && Objects.equals(doubleMember(), other.doubleMember()) && Objects.equals(longMember(), other.longMember()) - && Objects.equals(simpleList(), other.simpleList()) + && hasSimpleList() == other.hasSimpleList() && Objects.equals(simpleList(), other.simpleList()) + && hasListOfEnums() == other.hasListOfEnums() && Objects.equals(listOfEnumsAsStrings(), other.listOfEnumsAsStrings()) - && Objects.equals(listOfMaps(), other.listOfMaps()) && Objects.equals(listOfStructs(), other.listOfStructs()) + && hasListOfMaps() == other.hasListOfMaps() && Objects.equals(listOfMaps(), other.listOfMaps()) + && hasListOfStructs() == other.hasListOfStructs() && Objects.equals(listOfStructs(), other.listOfStructs()) + && hasListOfMapOfEnumToString() == other.hasListOfMapOfEnumToString() && Objects.equals(listOfMapOfEnumToStringAsStrings(), other.listOfMapOfEnumToStringAsStrings()) + && hasMapOfStringToIntegerList() == other.hasMapOfStringToIntegerList() && Objects.equals(mapOfStringToIntegerList(), other.mapOfStringToIntegerList()) + && hasMapOfStringToString() == other.hasMapOfStringToString() && Objects.equals(mapOfStringToString(), other.mapOfStringToString()) + && hasMapOfStringToSimpleStruct() == other.hasMapOfStringToSimpleStruct() && Objects.equals(mapOfStringToSimpleStruct(), other.mapOfStringToSimpleStruct()) + && hasMapOfEnumToEnum() == other.hasMapOfEnumToEnum() && Objects.equals(mapOfEnumToEnumAsStrings(), other.mapOfEnumToEnumAsStrings()) + && hasMapOfEnumToString() == other.hasMapOfEnumToString() && Objects.equals(mapOfEnumToStringAsStrings(), other.mapOfEnumToStringAsStrings()) + && hasMapOfStringToEnum() == other.hasMapOfStringToEnum() && Objects.equals(mapOfStringToEnumAsStrings(), other.mapOfStringToEnumAsStrings()) + && hasMapOfEnumToSimpleStruct() == other.hasMapOfEnumToSimpleStruct() && Objects.equals(mapOfEnumToSimpleStructAsStrings(), other.mapOfEnumToSimpleStructAsStrings()) + && hasMapOfEnumToListOfEnums() == other.hasMapOfEnumToListOfEnums() && Objects.equals(mapOfEnumToListOfEnumsAsStrings(), other.mapOfEnumToListOfEnumsAsStrings()) + && hasMapOfEnumToMapOfStringToEnum() == other.hasMapOfEnumToMapOfStringToEnum() && Objects.equals(mapOfEnumToMapOfStringToEnumAsStrings(), other.mapOfEnumToMapOfStringToEnumAsStrings()) && Objects.equals(timestampMember(), other.timestampMember()) && Objects.equals(structWithNestedTimestampMember(), other.structWithNestedTimestampMember()) && Objects.equals(blobArg(), other.blobArg()) - && Objects.equals(structWithNestedBlob(), other.structWithNestedBlob()) - && Objects.equals(blobMap(), other.blobMap()) && Objects.equals(listOfBlobs(), other.listOfBlobs()) + && Objects.equals(structWithNestedBlob(), other.structWithNestedBlob()) && hasBlobMap() == other.hasBlobMap() + && Objects.equals(blobMap(), other.blobMap()) && hasListOfBlobs() == other.hasListOfBlobs() + && Objects.equals(listOfBlobs(), other.listOfBlobs()) && Objects.equals(recursiveStruct(), other.recursiveStruct()) && Objects.equals(polymorphicTypeWithSubTypes(), other.polymorphicTypeWithSubTypes()) && Objects.equals(polymorphicTypeWithoutSubTypes(), other.polymorphicTypeWithoutSubTypes()) - && Objects.equals(enumTypeAsString(), other.enumTypeAsString()); + && Objects.equals(enumTypeAsString(), other.enumTypeAsString()) + && Objects.equals(underscore_Name_Type(), other.underscore_Name_Type()); } /** @@ -1237,23 +1273,36 @@ public final boolean equalsBySdkFields(Object obj) { */ @Override public final String toString() { - return ToString.builder("AllTypesResponse").add("StringMember", stringMember()).add("IntegerMember", integerMember()) - .add("BooleanMember", booleanMember()).add("FloatMember", floatMember()).add("DoubleMember", doubleMember()) - .add("LongMember", longMember()).add("SimpleList", simpleList()).add("ListOfEnums", listOfEnumsAsStrings()) - .add("ListOfMaps", listOfMaps()).add("ListOfStructs", listOfStructs()) - .add("ListOfMapOfEnumToString", listOfMapOfEnumToStringAsStrings()) - .add("MapOfStringToIntegerList", mapOfStringToIntegerList()).add("MapOfStringToString", mapOfStringToString()) - .add("MapOfStringToSimpleStruct", mapOfStringToSimpleStruct()).add("MapOfEnumToEnum", mapOfEnumToEnumAsStrings()) - .add("MapOfEnumToString", mapOfEnumToStringAsStrings()).add("MapOfStringToEnum", mapOfStringToEnumAsStrings()) - .add("MapOfEnumToSimpleStruct", mapOfEnumToSimpleStructAsStrings()) - .add("MapOfEnumToListOfEnums", mapOfEnumToListOfEnumsAsStrings()) - .add("MapOfEnumToMapOfStringToEnum", mapOfEnumToMapOfStringToEnumAsStrings()) + return ToString + .builder("AllTypesResponse") + .add("StringMember", stringMember()) + .add("IntegerMember", integerMember()) + .add("BooleanMember", booleanMember()) + .add("FloatMember", floatMember()) + .add("DoubleMember", doubleMember()) + .add("LongMember", longMember()) + .add("SimpleList", hasSimpleList() ? simpleList() : null) + .add("ListOfEnums", hasListOfEnums() ? listOfEnumsAsStrings() : null) + .add("ListOfMaps", hasListOfMaps() ? listOfMaps() : null) + .add("ListOfStructs", hasListOfStructs() ? listOfStructs() : null) + .add("ListOfMapOfEnumToString", hasListOfMapOfEnumToString() ? listOfMapOfEnumToStringAsStrings() : null) + .add("MapOfStringToIntegerList", hasMapOfStringToIntegerList() ? mapOfStringToIntegerList() : null) + .add("MapOfStringToString", hasMapOfStringToString() ? mapOfStringToString() : null) + .add("MapOfStringToSimpleStruct", hasMapOfStringToSimpleStruct() ? mapOfStringToSimpleStruct() : null) + .add("MapOfEnumToEnum", hasMapOfEnumToEnum() ? mapOfEnumToEnumAsStrings() : null) + .add("MapOfEnumToString", hasMapOfEnumToString() ? mapOfEnumToStringAsStrings() : null) + .add("MapOfStringToEnum", hasMapOfStringToEnum() ? mapOfStringToEnumAsStrings() : null) + .add("MapOfEnumToSimpleStruct", hasMapOfEnumToSimpleStruct() ? mapOfEnumToSimpleStructAsStrings() : null) + .add("MapOfEnumToListOfEnums", hasMapOfEnumToListOfEnums() ? mapOfEnumToListOfEnumsAsStrings() : null) + .add("MapOfEnumToMapOfStringToEnum", + hasMapOfEnumToMapOfStringToEnum() ? mapOfEnumToMapOfStringToEnumAsStrings() : null) .add("TimestampMember", timestampMember()) .add("StructWithNestedTimestampMember", structWithNestedTimestampMember()).add("BlobArg", blobArg()) - .add("StructWithNestedBlob", structWithNestedBlob()).add("BlobMap", blobMap()).add("ListOfBlobs", listOfBlobs()) - .add("RecursiveStruct", recursiveStruct()).add("PolymorphicTypeWithSubTypes", polymorphicTypeWithSubTypes()) + .add("StructWithNestedBlob", structWithNestedBlob()).add("BlobMap", hasBlobMap() ? blobMap() : null) + .add("ListOfBlobs", hasListOfBlobs() ? listOfBlobs() : null).add("RecursiveStruct", recursiveStruct()) + .add("PolymorphicTypeWithSubTypes", polymorphicTypeWithSubTypes()) .add("PolymorphicTypeWithoutSubTypes", polymorphicTypeWithoutSubTypes()).add("EnumType", enumTypeAsString()) - .build(); + .add("Underscore_Name_Type", underscore_Name_Type()).build(); } public final Optional getValueForField(String fieldName, Class clazz) { @@ -1318,6 +1367,8 @@ public final Optional getValueForField(String fieldName, Class clazz) return Optional.ofNullable(clazz.cast(polymorphicTypeWithoutSubTypes())); case "EnumType": return Optional.ofNullable(clazz.cast(enumTypeAsString())); + case "Underscore_Name_Type": + return Optional.ofNullable(clazz.cast(underscore_Name_Type())); default: return Optional.empty(); } @@ -1852,6 +1903,33 @@ default Builder polymorphicTypeWithoutSubTypes(Consumer poly * @see EnumType */ Builder enumType(EnumType enumType); + + /** + * Sets the value of the Underscore_Name_Type property for this object. + * + * @param underscore_Name_Type + * The new value for the Underscore_Name_Type property for this object. + * @return Returns a reference to this object so that method calls can be chained together. + */ + Builder underscore_Name_Type(Underscore_Name_Type underscore_Name_Type); + + /** + * Sets the value of the Underscore_Name_Type property for this object. + * + * This is a convenience that creates an instance of the {@link Underscore_Name_Type.Builder} avoiding the need + * to create one manually via {@link Underscore_Name_Type#builder()}. + * + * When the {@link Consumer} completes, {@link Underscore_Name_Type.Builder#build()} is called immediately and + * its result is passed to {@link #underscore_Name_Type(Underscore_Name_Type)}. + * + * @param underscore_Name_Type + * a consumer that will call methods on {@link Underscore_Name_Type.Builder} + * @return Returns a reference to this object so that method calls can be chained together. + * @see #underscore_Name_Type(Underscore_Name_Type) + */ + default Builder underscore_Name_Type(Consumer underscore_Name_Type) { + return underscore_Name_Type(Underscore_Name_Type.builder().applyMutation(underscore_Name_Type).build()); + } } static final class BuilderImpl extends JsonProtocolTestsResponse.BuilderImpl implements Builder { @@ -1915,6 +1993,8 @@ static final class BuilderImpl extends JsonProtocolTestsResponse.BuilderImpl imp private String enumType; + private Underscore_Name_Type underscore_Name_Type; + private BuilderImpl() { } @@ -1950,6 +2030,7 @@ private BuilderImpl(AllTypesResponse model) { polymorphicTypeWithSubTypes(model.polymorphicTypeWithSubTypes); polymorphicTypeWithoutSubTypes(model.polymorphicTypeWithoutSubTypes); enumType(model.enumType); + underscore_Name_Type(model.underscore_Name_Type); } public final String getStringMember() { @@ -2037,6 +2118,9 @@ public final void setLongMember(Long longMember) { } public final Collection getSimpleList() { + if (simpleList instanceof SdkAutoConstructList) { + return null; + } return simpleList; } @@ -2058,6 +2142,9 @@ public final void setSimpleList(Collection simpleList) { } public final Collection getListOfEnums() { + if (listOfEnums instanceof SdkAutoConstructList) { + return null; + } return listOfEnums; } @@ -2092,6 +2179,9 @@ public final void setListOfEnums(Collection listOfEnums) { } public final Collection> getListOfMaps() { + if (listOfMaps instanceof SdkAutoConstructList) { + return null; + } return listOfMaps; } @@ -2113,6 +2203,9 @@ public final void setListOfMaps(Collection> listOf } public final Collection getListOfStructs() { + if (listOfStructs instanceof SdkAutoConstructList) { + return null; + } return listOfStructs != null ? listOfStructs.stream().map(SimpleStruct::toBuilder).collect(Collectors.toList()) : null; } @@ -2143,6 +2236,9 @@ public final void setListOfStructs(Collection listOfSt } public final Collection> getListOfMapOfEnumToString() { + if (listOfMapOfEnumToString instanceof SdkAutoConstructList) { + return null; + } return listOfMapOfEnumToString; } @@ -2164,6 +2260,9 @@ public final void setListOfMapOfEnumToString(Collection> getMapOfStringToIntegerList() { + if (mapOfStringToIntegerList instanceof SdkAutoConstructMap) { + return null; + } return mapOfStringToIntegerList; } @@ -2178,6 +2277,9 @@ public final void setMapOfStringToIntegerList(Map getMapOfStringToString() { + if (mapOfStringToString instanceof SdkAutoConstructMap) { + return null; + } return mapOfStringToString; } @@ -2192,6 +2294,9 @@ public final void setMapOfStringToString(Map mapOfStringToString } public final Map getMapOfStringToSimpleStruct() { + if (mapOfStringToSimpleStruct instanceof SdkAutoConstructMap) { + return null; + } return mapOfStringToSimpleStruct != null ? CollectionUtils.mapValues(mapOfStringToSimpleStruct, SimpleStruct::toBuilder) : null; } @@ -2207,6 +2312,9 @@ public final void setMapOfStringToSimpleStruct(Map getMapOfEnumToEnum() { + if (mapOfEnumToEnum instanceof SdkAutoConstructMap) { + return null; + } return mapOfEnumToEnum; } @@ -2227,6 +2335,9 @@ public final void setMapOfEnumToEnum(Map mapOfEnumToEnum) { } public final Map getMapOfEnumToString() { + if (mapOfEnumToString instanceof SdkAutoConstructMap) { + return null; + } return mapOfEnumToString; } @@ -2247,6 +2358,9 @@ public final void setMapOfEnumToString(Map mapOfEnumToString) { } public final Map getMapOfStringToEnum() { + if (mapOfStringToEnum instanceof SdkAutoConstructMap) { + return null; + } return mapOfStringToEnum; } @@ -2267,6 +2381,9 @@ public final void setMapOfStringToEnum(Map mapOfStringToEnum) { } public final Map getMapOfEnumToSimpleStruct() { + if (mapOfEnumToSimpleStruct instanceof SdkAutoConstructMap) { + return null; + } return mapOfEnumToSimpleStruct != null ? CollectionUtils.mapValues(mapOfEnumToSimpleStruct, SimpleStruct::toBuilder) : null; } @@ -2288,6 +2405,9 @@ public final void setMapOfEnumToSimpleStruct(Map> getMapOfEnumToListOfEnums() { + if (mapOfEnumToListOfEnums instanceof SdkAutoConstructMap) { + return null; + } return mapOfEnumToListOfEnums; } @@ -2308,6 +2428,9 @@ public final void setMapOfEnumToListOfEnums(Map> getMapOfEnumToMapOfStringToEnum() { + if (mapOfEnumToMapOfStringToEnum instanceof SdkAutoConstructMap) { + return null; + } return mapOfEnumToMapOfStringToEnum; } @@ -2385,6 +2508,9 @@ public final void setStructWithNestedBlob(StructWithNestedBlobType.BuilderImpl s } public final Map getBlobMap() { + if (blobMap instanceof SdkAutoConstructMap) { + return null; + } return blobMap == null ? null : blobMap.entrySet().stream() .collect(Collectors.toMap(e -> e.getKey(), e -> e.getValue().asByteBuffer())); } @@ -2401,6 +2527,9 @@ public final void setBlobMap(Map blobMap) { } public final List getListOfBlobs() { + if (listOfBlobs instanceof SdkAutoConstructList) { + return null; + } return listOfBlobs == null ? null : listOfBlobs.stream().map(SdkBytes::asByteBuffer).collect(Collectors.toList()); } @@ -2485,6 +2614,20 @@ public final void setEnumType(String enumType) { this.enumType = enumType; } + public final Underscore_Name_Type.Builder getUnderscore_Name_Type() { + return underscore_Name_Type != null ? underscore_Name_Type.toBuilder() : null; + } + + @Override + public final Builder underscore_Name_Type(Underscore_Name_Type underscore_Name_Type) { + this.underscore_Name_Type = underscore_Name_Type; + return this; + } + + public final void setUnderscore_Name_Type(Underscore_Name_Type.BuilderImpl underscore_Name_Type) { + this.underscore_Name_Type = underscore_Name_Type != null ? underscore_Name_Type.build() : null; + } + @Override public AllTypesResponse build() { return new AllTypesResponse(this); diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/customization.config b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/customization.config index 089c236467d7..f03277692cc2 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/customization.config +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/customization.config @@ -19,5 +19,6 @@ } ] } - } + }, + "underscoresInNameBehavior": "ALLOW" } \ No newline at end of file diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/customresponsemetadata/customization.config b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/customresponsemetadata/customization.config index 47c797cac39f..d3b7f5d49d46 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/customresponsemetadata/customization.config +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/customresponsemetadata/customization.config @@ -9,5 +9,6 @@ "FOO_ID" : "x-foo-id", "BAR_ID" : "x-bar-id", "REQUEST_ID": "x-foobar-id" - } + }, + "underscoresInNameBehavior": "ALLOW" } \ No newline at end of file diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/existencechecknamingrequest.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/existencechecknamingrequest.java index 79e7d5bc52af..54c651084b78 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/existencechecknamingrequest.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/existencechecknamingrequest.java @@ -216,10 +216,10 @@ public static Class serializableBuilderClass() { public final int hashCode() { int hashCode = 1; hashCode = 31 * hashCode + super.hashCode(); - hashCode = 31 * hashCode + Objects.hashCode(build()); - hashCode = 31 * hashCode + Objects.hashCode(superValue()); - hashCode = 31 * hashCode + Objects.hashCode(toStringValue()); - hashCode = 31 * hashCode + Objects.hashCode(equalsValue()); + hashCode = 31 * hashCode + Objects.hashCode(hasBuild() ? build() : null); + hashCode = 31 * hashCode + Objects.hashCode(hasSuperValue() ? superValue() : null); + hashCode = 31 * hashCode + Objects.hashCode(hasToStringValue() ? toStringValue() : null); + hashCode = 31 * hashCode + Objects.hashCode(hasEqualsValue() ? equalsValue() : null); return hashCode; } @@ -240,8 +240,10 @@ public final boolean equalsBySdkFields(Object obj) { return false; } ExistenceCheckNamingRequest other = (ExistenceCheckNamingRequest) obj; - return Objects.equals(build(), other.build()) && Objects.equals(superValue(), other.superValue()) - && Objects.equals(toStringValue(), other.toStringValue()) && Objects.equals(equalsValue(), other.equalsValue()); + return hasBuild() == other.hasBuild() && Objects.equals(build(), other.build()) + && hasSuperValue() == other.hasSuperValue() && Objects.equals(superValue(), other.superValue()) + && hasToStringValue() == other.hasToStringValue() && Objects.equals(toStringValue(), other.toStringValue()) + && hasEqualsValue() == other.hasEqualsValue() && Objects.equals(equalsValue(), other.equalsValue()); } /** @@ -250,8 +252,9 @@ public final boolean equalsBySdkFields(Object obj) { */ @Override public final String toString() { - return ToString.builder("ExistenceCheckNamingRequest").add("Build", build()).add("Super", superValue()) - .add("ToString", toStringValue()).add("Equals", equalsValue()).build(); + return ToString.builder("ExistenceCheckNamingRequest").add("Build", hasBuild() ? build() : null) + .add("Super", hasSuperValue() ? superValue() : null).add("ToString", hasToStringValue() ? toStringValue() : null) + .add("Equals", hasEqualsValue() ? equalsValue() : null).build(); } public final Optional getValueForField(String fieldName, Class clazz) { @@ -366,6 +369,9 @@ private BuilderImpl(ExistenceCheckNamingRequest model) { } public final Collection getBuild() { + if (build instanceof SdkAutoConstructList) { + return null; + } return build; } @@ -387,6 +393,9 @@ public final void setBuild(Collection build) { } public final Collection getSuperValue() { + if (superValue instanceof SdkAutoConstructList) { + return null; + } return superValue; } @@ -408,6 +417,9 @@ public final void setSuperValue(Collection superValue) { } public final Map getToStringValue() { + if (toStringValue instanceof SdkAutoConstructMap) { + return null; + } return toStringValue; } @@ -422,6 +434,9 @@ public final void setToStringValue(Map toStringValue) { } public final Map getEqualsValue() { + if (equalsValue instanceof SdkAutoConstructMap) { + return null; + } return equalsValue; } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/existencechecknamingresponse.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/existencechecknamingresponse.java index 8d7817d17a83..7432ab65d384 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/existencechecknamingresponse.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/existencechecknamingresponse.java @@ -214,10 +214,10 @@ public static Class serializableBuilderClass() { public final int hashCode() { int hashCode = 1; hashCode = 31 * hashCode + super.hashCode(); - hashCode = 31 * hashCode + Objects.hashCode(build()); - hashCode = 31 * hashCode + Objects.hashCode(superValue()); - hashCode = 31 * hashCode + Objects.hashCode(toStringValue()); - hashCode = 31 * hashCode + Objects.hashCode(equalsValue()); + hashCode = 31 * hashCode + Objects.hashCode(hasBuild() ? build() : null); + hashCode = 31 * hashCode + Objects.hashCode(hasSuperValue() ? superValue() : null); + hashCode = 31 * hashCode + Objects.hashCode(hasToStringValue() ? toStringValue() : null); + hashCode = 31 * hashCode + Objects.hashCode(hasEqualsValue() ? equalsValue() : null); return hashCode; } @@ -238,8 +238,10 @@ public final boolean equalsBySdkFields(Object obj) { return false; } ExistenceCheckNamingResponse other = (ExistenceCheckNamingResponse) obj; - return Objects.equals(build(), other.build()) && Objects.equals(superValue(), other.superValue()) - && Objects.equals(toStringValue(), other.toStringValue()) && Objects.equals(equalsValue(), other.equalsValue()); + return hasBuild() == other.hasBuild() && Objects.equals(build(), other.build()) + && hasSuperValue() == other.hasSuperValue() && Objects.equals(superValue(), other.superValue()) + && hasToStringValue() == other.hasToStringValue() && Objects.equals(toStringValue(), other.toStringValue()) + && hasEqualsValue() == other.hasEqualsValue() && Objects.equals(equalsValue(), other.equalsValue()); } /** @@ -248,8 +250,9 @@ public final boolean equalsBySdkFields(Object obj) { */ @Override public final String toString() { - return ToString.builder("ExistenceCheckNamingResponse").add("Build", build()).add("Super", superValue()) - .add("ToString", toStringValue()).add("Equals", equalsValue()).build(); + return ToString.builder("ExistenceCheckNamingResponse").add("Build", hasBuild() ? build() : null) + .add("Super", hasSuperValue() ? superValue() : null).add("ToString", hasToStringValue() ? toStringValue() : null) + .add("Equals", hasEqualsValue() ? equalsValue() : null).build(); } public final Optional getValueForField(String fieldName, Class clazz) { @@ -358,6 +361,9 @@ private BuilderImpl(ExistenceCheckNamingResponse model) { } public final Collection getBuild() { + if (build instanceof SdkAutoConstructList) { + return null; + } return build; } @@ -379,6 +385,9 @@ public final void setBuild(Collection build) { } public final Collection getSuperValue() { + if (superValue instanceof SdkAutoConstructList) { + return null; + } return superValue; } @@ -400,6 +409,9 @@ public final void setSuperValue(Collection superValue) { } public final Map getToStringValue() { + if (toStringValue instanceof SdkAutoConstructMap) { + return null; + } return toStringValue; } @@ -414,6 +426,9 @@ public final void setToStringValue(Map toStringValue) { } public final Map getEqualsValue() { + if (equalsValue instanceof SdkAutoConstructMap) { + return null; + } return equalsValue; } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/listofsimplestructscopier.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/listofsimplestructscopier.java index ba144c1b862d..becb315a4d9a 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/listofsimplestructscopier.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/listofsimplestructscopier.java @@ -21,8 +21,8 @@ static List copy(Collection listOfSimpleStructsParam } static List copyFromBuilder(Collection listOfSimpleStructsParam) { - if (listOfSimpleStructsParam == null) { - return null; + if (listOfSimpleStructsParam == null || listOfSimpleStructsParam instanceof DefaultSdkAutoConstructList) { + return DefaultSdkAutoConstructList.getInstance(); } return copy(listOfSimpleStructsParam.stream().map(SimpleStruct.Builder::build).collect(toList())); } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/mapofenumtosimplestructcopier.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/mapofenumtosimplestructcopier.java index 3bc4cb33b5f0..e0e532a1de26 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/mapofenumtosimplestructcopier.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/mapofenumtosimplestructcopier.java @@ -16,13 +16,13 @@ static Map copy(Map mapOfEnumToSimpl return DefaultSdkAutoConstructMap.getInstance(); } Map mapOfEnumToSimpleStructParamCopy = mapOfEnumToSimpleStructParam.entrySet().stream() - .collect(HashMap::new, (m, e) -> m.put(e.getKey(), e.getValue()), HashMap::putAll); + .collect(HashMap::new, (m, e) -> m.put(e.getKey(), e.getValue()), HashMap::putAll); return Collections.unmodifiableMap(mapOfEnumToSimpleStructParamCopy); } static Map copyFromBuilder(Map mapOfEnumToSimpleStructParam) { - if (mapOfEnumToSimpleStructParam == null) { - return null; + if (mapOfEnumToSimpleStructParam == null || mapOfEnumToSimpleStructParam instanceof DefaultSdkAutoConstructMap) { + return DefaultSdkAutoConstructMap.getInstance(); } return copy(mapOfEnumToSimpleStructParam.entrySet().stream().collect(toMap(Map.Entry::getKey, e -> e.getValue().build()))); } @@ -32,7 +32,7 @@ static Map copyEnumToString(Map ma return DefaultSdkAutoConstructMap.getInstance(); } Map mapOfEnumToSimpleStructParamCopy = mapOfEnumToSimpleStructParam.entrySet().stream() - .collect(HashMap::new, (m, e) -> m.put(e.getKey().toString(), e.getValue()), HashMap::putAll); + .collect(HashMap::new, (m, e) -> m.put(e.getKey().toString(), e.getValue()), HashMap::putAll); return Collections.unmodifiableMap(mapOfEnumToSimpleStructParamCopy); } @@ -41,12 +41,12 @@ static Map copyStringToEnum(Map ma return DefaultSdkAutoConstructMap.getInstance(); } Map mapOfEnumToSimpleStructParamCopy = mapOfEnumToSimpleStructParam.entrySet().stream() - .collect(HashMap::new, (m, e) -> { - EnumType keyAsEnum = EnumType.fromValue(e.getKey()); - if (keyAsEnum != EnumType.UNKNOWN_TO_SDK_VERSION) { - m.put(keyAsEnum, e.getValue()); - } - }, HashMap::putAll); + .collect(HashMap::new, (m, e) -> { + EnumType keyAsEnum = EnumType.fromValue(e.getKey()); + if (keyAsEnum != EnumType.UNKNOWN_TO_SDK_VERSION) { + m.put(keyAsEnum, e.getValue()); + } + }, HashMap::putAll); return Collections.unmodifiableMap(mapOfEnumToSimpleStructParamCopy); } } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/mapofstringtosimplestructcopier.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/mapofstringtosimplestructcopier.java index 226288537ce2..50471407921b 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/mapofstringtosimplestructcopier.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/mapofstringtosimplestructcopier.java @@ -16,15 +16,15 @@ static Map copy(Map mapOfStringToSim return DefaultSdkAutoConstructMap.getInstance(); } Map mapOfStringToSimpleStructParamCopy = mapOfStringToSimpleStructParam.entrySet().stream() - .collect(HashMap::new, (m, e) -> m.put(e.getKey(), e.getValue()), HashMap::putAll); + .collect(HashMap::new, (m, e) -> m.put(e.getKey(), e.getValue()), HashMap::putAll); return Collections.unmodifiableMap(mapOfStringToSimpleStructParamCopy); } static Map copyFromBuilder(Map mapOfStringToSimpleStructParam) { - if (mapOfStringToSimpleStructParam == null) { - return null; + if (mapOfStringToSimpleStructParam == null || mapOfStringToSimpleStructParam instanceof DefaultSdkAutoConstructMap) { + return DefaultSdkAutoConstructMap.getInstance(); } return copy(mapOfStringToSimpleStructParam.entrySet().stream() - .collect(toMap(Map.Entry::getKey, e -> e.getValue().build()))); + .collect(toMap(Map.Entry::getKey, e -> e.getValue().build()))); } } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nestedcontainersrequest.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nestedcontainersrequest.java index 8f8d6069f700..7b68fc584031 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nestedcontainersrequest.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nestedcontainersrequest.java @@ -235,9 +235,10 @@ public static Class serializableBuilderClass() { public final int hashCode() { int hashCode = 1; hashCode = 31 * hashCode + super.hashCode(); - hashCode = 31 * hashCode + Objects.hashCode(listOfListOfStrings()); - hashCode = 31 * hashCode + Objects.hashCode(listOfListOfListOfStrings()); - hashCode = 31 * hashCode + Objects.hashCode(mapOfStringToListOfListOfStrings()); + hashCode = 31 * hashCode + Objects.hashCode(hasListOfListOfStrings() ? listOfListOfStrings() : null); + hashCode = 31 * hashCode + Objects.hashCode(hasListOfListOfListOfStrings() ? listOfListOfListOfStrings() : null); + hashCode = 31 * hashCode + + Objects.hashCode(hasMapOfStringToListOfListOfStrings() ? mapOfStringToListOfListOfStrings() : null); return hashCode; } @@ -258,8 +259,11 @@ public final boolean equalsBySdkFields(Object obj) { return false; } NestedContainersRequest other = (NestedContainersRequest) obj; - return Objects.equals(listOfListOfStrings(), other.listOfListOfStrings()) + return hasListOfListOfStrings() == other.hasListOfListOfStrings() + && Objects.equals(listOfListOfStrings(), other.listOfListOfStrings()) + && hasListOfListOfListOfStrings() == other.hasListOfListOfListOfStrings() && Objects.equals(listOfListOfListOfStrings(), other.listOfListOfListOfStrings()) + && hasMapOfStringToListOfListOfStrings() == other.hasMapOfStringToListOfListOfStrings() && Objects.equals(mapOfStringToListOfListOfStrings(), other.mapOfStringToListOfListOfStrings()); } @@ -269,9 +273,12 @@ public final boolean equalsBySdkFields(Object obj) { */ @Override public final String toString() { - return ToString.builder("NestedContainersRequest").add("ListOfListOfStrings", listOfListOfStrings()) - .add("ListOfListOfListOfStrings", listOfListOfListOfStrings()) - .add("MapOfStringToListOfListOfStrings", mapOfStringToListOfListOfStrings()).build(); + return ToString + .builder("NestedContainersRequest") + .add("ListOfListOfStrings", hasListOfListOfStrings() ? listOfListOfStrings() : null) + .add("ListOfListOfListOfStrings", hasListOfListOfListOfStrings() ? listOfListOfListOfStrings() : null) + .add("MapOfStringToListOfListOfStrings", + hasMapOfStringToListOfListOfStrings() ? mapOfStringToListOfListOfStrings() : null).build(); } public final Optional getValueForField(String fieldName, Class clazz) { @@ -372,6 +379,9 @@ private BuilderImpl(NestedContainersRequest model) { } public final Collection> getListOfListOfStrings() { + if (listOfListOfStrings instanceof SdkAutoConstructList) { + return null; + } return listOfListOfStrings; } @@ -393,6 +403,9 @@ public final void setListOfListOfStrings(Collection } public final Collection>> getListOfListOfListOfStrings() { + if (listOfListOfListOfStrings instanceof SdkAutoConstructList) { + return null; + } return listOfListOfListOfStrings; } @@ -416,6 +429,9 @@ public final void setListOfListOfListOfStrings( } public final Map>> getMapOfStringToListOfListOfStrings() { + if (mapOfStringToListOfListOfStrings instanceof SdkAutoConstructMap) { + return null; + } return mapOfStringToListOfListOfStrings; } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nestedcontainersresponse.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nestedcontainersresponse.java index 72dcb9750ad8..6ff648142973 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nestedcontainersresponse.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nestedcontainersresponse.java @@ -233,9 +233,10 @@ public static Class serializableBuilderClass() { public final int hashCode() { int hashCode = 1; hashCode = 31 * hashCode + super.hashCode(); - hashCode = 31 * hashCode + Objects.hashCode(listOfListOfStrings()); - hashCode = 31 * hashCode + Objects.hashCode(listOfListOfListOfStrings()); - hashCode = 31 * hashCode + Objects.hashCode(mapOfStringToListOfListOfStrings()); + hashCode = 31 * hashCode + Objects.hashCode(hasListOfListOfStrings() ? listOfListOfStrings() : null); + hashCode = 31 * hashCode + Objects.hashCode(hasListOfListOfListOfStrings() ? listOfListOfListOfStrings() : null); + hashCode = 31 * hashCode + + Objects.hashCode(hasMapOfStringToListOfListOfStrings() ? mapOfStringToListOfListOfStrings() : null); return hashCode; } @@ -256,8 +257,11 @@ public final boolean equalsBySdkFields(Object obj) { return false; } NestedContainersResponse other = (NestedContainersResponse) obj; - return Objects.equals(listOfListOfStrings(), other.listOfListOfStrings()) + return hasListOfListOfStrings() == other.hasListOfListOfStrings() + && Objects.equals(listOfListOfStrings(), other.listOfListOfStrings()) + && hasListOfListOfListOfStrings() == other.hasListOfListOfListOfStrings() && Objects.equals(listOfListOfListOfStrings(), other.listOfListOfListOfStrings()) + && hasMapOfStringToListOfListOfStrings() == other.hasMapOfStringToListOfListOfStrings() && Objects.equals(mapOfStringToListOfListOfStrings(), other.mapOfStringToListOfListOfStrings()); } @@ -267,9 +271,12 @@ public final boolean equalsBySdkFields(Object obj) { */ @Override public final String toString() { - return ToString.builder("NestedContainersResponse").add("ListOfListOfStrings", listOfListOfStrings()) - .add("ListOfListOfListOfStrings", listOfListOfListOfStrings()) - .add("MapOfStringToListOfListOfStrings", mapOfStringToListOfListOfStrings()).build(); + return ToString + .builder("NestedContainersResponse") + .add("ListOfListOfStrings", hasListOfListOfStrings() ? listOfListOfStrings() : null) + .add("ListOfListOfListOfStrings", hasListOfListOfListOfStrings() ? listOfListOfListOfStrings() : null) + .add("MapOfStringToListOfListOfStrings", + hasMapOfStringToListOfListOfStrings() ? mapOfStringToListOfListOfStrings() : null).build(); } public final Optional getValueForField(String fieldName, Class clazz) { @@ -365,6 +372,9 @@ private BuilderImpl(NestedContainersResponse model) { } public final Collection> getListOfListOfStrings() { + if (listOfListOfStrings instanceof SdkAutoConstructList) { + return null; + } return listOfListOfStrings; } @@ -386,6 +396,9 @@ public final void setListOfListOfStrings(Collection } public final Collection>> getListOfListOfListOfStrings() { + if (listOfListOfListOfStrings instanceof SdkAutoConstructList) { + return null; + } return listOfListOfListOfStrings; } @@ -409,6 +422,9 @@ public final void setListOfListOfListOfStrings( } public final Map>> getMapOfStringToListOfListOfStrings() { + if (mapOfStringToListOfListOfStrings instanceof SdkAutoConstructMap) { + return null; + } return mapOfStringToListOfListOfStrings; } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/blobmaptypecopier.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/blobmaptypecopier.java deleted file mode 100644 index 19b9e1f761ee..000000000000 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/blobmaptypecopier.java +++ /dev/null @@ -1,22 +0,0 @@ -package software.amazon.awssdk.services.jsonprotocoltests.model; - -import static java.util.stream.Collectors.toMap; - -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; -import software.amazon.awssdk.annotations.Generated; -import software.amazon.awssdk.core.SdkBytes; -import software.amazon.awssdk.core.adapter.StandardMemberCopier; - -@Generated("software.amazon.awssdk:codegen") -final class BlobMapTypeCopier { - static Map copy(Map blobMapTypeParam) { - if (blobMapTypeParam == null) { - return null; - } - Map blobMapTypeParamCopy = blobMapTypeParam.entrySet().stream() - .collect(HashMap::new, (m, e) -> m.put(e.getKey(), StandardMemberCopier.copy(e.getValue())), HashMap::putAll); - return Collections.unmodifiableMap(blobMapTypeParamCopy); - } -} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/listofblobstypecopier.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/listofblobstypecopier.java deleted file mode 100644 index 4ccf4939b5cb..000000000000 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/listofblobstypecopier.java +++ /dev/null @@ -1,21 +0,0 @@ -package software.amazon.awssdk.services.jsonprotocoltests.model; - -import static java.util.stream.Collectors.toList; - -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import software.amazon.awssdk.annotations.Generated; -import software.amazon.awssdk.core.SdkBytes; -import software.amazon.awssdk.core.adapter.StandardMemberCopier; - -@Generated("software.amazon.awssdk:codegen") -final class ListOfBlobsTypeCopier { - static List copy(Collection listOfBlobsTypeParam) { - if (listOfBlobsTypeParam == null) { - return null; - } - List listOfBlobsTypeParamCopy = listOfBlobsTypeParam.stream().map(StandardMemberCopier::copy).collect(toList()); - return Collections.unmodifiableList(listOfBlobsTypeParamCopy); - } -} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/listofenumscopier.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/listofenumscopier.java deleted file mode 100644 index 6a5ca6a8c850..000000000000 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/listofenumscopier.java +++ /dev/null @@ -1,37 +0,0 @@ -package software.amazon.awssdk.services.jsonprotocoltests.model; - -import static java.util.stream.Collectors.toList; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import software.amazon.awssdk.annotations.Generated; - -@Generated("software.amazon.awssdk:codegen") -final class ListOfEnumsCopier { - static List copy(Collection listOfEnumsParam) { - if (listOfEnumsParam == null) { - return null; - } - List listOfEnumsParamCopy = new ArrayList<>(listOfEnumsParam); - return Collections.unmodifiableList(listOfEnumsParamCopy); - } - - static List copyEnumToString(Collection listOfEnumsParam) { - if (listOfEnumsParam == null) { - return null; - } - List listOfEnumsParamCopy = listOfEnumsParam.stream().map(Object::toString).collect(toList()); - return Collections.unmodifiableList(listOfEnumsParamCopy); - } - - static List copyStringToEnum(Collection listOfEnumsParam) { - if (listOfEnumsParam == null) { - return null; - } - List listOfEnumsParamCopy = listOfEnumsParam.stream().map(EnumType::fromValue).collect(toList()); - return Collections.unmodifiableList(listOfEnumsParamCopy); - } -} - diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/listofintegerscopier.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/listofintegerscopier.java deleted file mode 100644 index 06a2c992b875..000000000000 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/listofintegerscopier.java +++ /dev/null @@ -1,20 +0,0 @@ -package software.amazon.awssdk.services.jsonprotocoltests.model; - -import static java.util.stream.Collectors.toList; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import software.amazon.awssdk.annotations.Generated; - -@Generated("software.amazon.awssdk:codegen") -final class ListOfIntegersCopier { - static List copy(Collection listOfIntegersParam) { - if (listOfIntegersParam == null) { - return null; - } - List listOfIntegersParamCopy = new ArrayList<>(listOfIntegersParam); - return Collections.unmodifiableList(listOfIntegersParamCopy); - } -} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/listoflistoflistofstringscopier.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/listoflistoflistofstringscopier.java deleted file mode 100644 index 268277c2175a..000000000000 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/listoflistoflistofstringscopier.java +++ /dev/null @@ -1,21 +0,0 @@ -package software.amazon.awssdk.services.jsonprotocoltests.model; - -import static java.util.stream.Collectors.toList; - -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import software.amazon.awssdk.annotations.Generated; - -@Generated("software.amazon.awssdk:codegen") -final class ListOfListOfListOfStringsCopier { - static List>> copy( - Collection>> listOfListOfListOfStringsParam) { - if (listOfListOfListOfStringsParam == null) { - return null; - } - List>> listOfListOfListOfStringsParamCopy = listOfListOfListOfStringsParam.stream() - .map(ListOfListOfStringsCopier::copy).collect(toList()); - return Collections.unmodifiableList(listOfListOfListOfStringsParamCopy); - } -} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/listoflistofstringscopier.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/listoflistofstringscopier.java deleted file mode 100644 index 71b7e35950ae..000000000000 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/listoflistofstringscopier.java +++ /dev/null @@ -1,20 +0,0 @@ -package software.amazon.awssdk.services.jsonprotocoltests.model; - -import static java.util.stream.Collectors.toList; - -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import software.amazon.awssdk.annotations.Generated; - -@Generated("software.amazon.awssdk:codegen") -final class ListOfListOfStringsCopier { - static List> copy(Collection> listOfListOfStringsParam) { - if (listOfListOfStringsParam == null) { - return null; - } - List> listOfListOfStringsParamCopy = listOfListOfStringsParam.stream().map(ListOfStringsCopier::copy) - .collect(toList()); - return Collections.unmodifiableList(listOfListOfStringsParamCopy); - } -} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/listofmapofenumtostringcopier.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/listofmapofenumtostringcopier.java deleted file mode 100644 index 4f7c7788c248..000000000000 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/listofmapofenumtostringcopier.java +++ /dev/null @@ -1,39 +0,0 @@ -package software.amazon.awssdk.services.jsonprotocoltests.model; - -import static java.util.stream.Collectors.toList; - -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import software.amazon.awssdk.annotations.Generated; - -@Generated("software.amazon.awssdk:codegen") -final class ListOfMapOfEnumToStringCopier { - static List> copy(Collection> listOfMapOfEnumToStringParam) { - if (listOfMapOfEnumToStringParam == null) { - return null; - } - List> listOfMapOfEnumToStringParamCopy = listOfMapOfEnumToStringParam.stream() - .map(MapOfEnumToStringCopier::copy).collect(toList()); - return Collections.unmodifiableList(listOfMapOfEnumToStringParamCopy); - } - - static List> copyEnumToString(Collection> listOfMapOfEnumToStringParam) { - if (listOfMapOfEnumToStringParam == null) { - return null; - } - List> listOfMapOfEnumToStringParamCopy = listOfMapOfEnumToStringParam.stream() - .map(MapOfEnumToStringCopier::copyEnumToString).collect(toList()); - return Collections.unmodifiableList(listOfMapOfEnumToStringParamCopy); - } - - static List> copyStringToEnum(Collection> listOfMapOfEnumToStringParam) { - if (listOfMapOfEnumToStringParam == null) { - return null; - } - List> listOfMapOfEnumToStringParamCopy = listOfMapOfEnumToStringParam.stream() - .map(MapOfEnumToStringCopier::copyStringToEnum).collect(toList()); - return Collections.unmodifiableList(listOfMapOfEnumToStringParamCopy); - } -} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/listofmapstringtostringcopier.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/listofmapstringtostringcopier.java deleted file mode 100644 index 4a90d8ebbe9b..000000000000 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/listofmapstringtostringcopier.java +++ /dev/null @@ -1,21 +0,0 @@ -package software.amazon.awssdk.services.jsonprotocoltests.model; - -import static java.util.stream.Collectors.toList; - -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import software.amazon.awssdk.annotations.Generated; - -@Generated("software.amazon.awssdk:codegen") -final class ListOfMapStringToStringCopier { - static List> copy(Collection> listOfMapStringToStringParam) { - if (listOfMapStringToStringParam == null) { - return null; - } - List> listOfMapStringToStringParamCopy = listOfMapStringToStringParam.stream() - .map(MapOfStringToStringCopier::copy).collect(toList()); - return Collections.unmodifiableList(listOfMapStringToStringParamCopy); - } -} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/listofsimplestructscopier.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/listofsimplestructscopier.java deleted file mode 100644 index 240ff1412446..000000000000 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/listofsimplestructscopier.java +++ /dev/null @@ -1,27 +0,0 @@ -package software.amazon.awssdk.services.jsonprotocoltests.model; - -import static java.util.stream.Collectors.toList; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import software.amazon.awssdk.annotations.Generated; - -@Generated("software.amazon.awssdk:codegen") -final class ListOfSimpleStructsCopier { - static List copy(Collection listOfSimpleStructsParam) { - if (listOfSimpleStructsParam == null) { - return null; - } - List listOfSimpleStructsParamCopy = new ArrayList<>(listOfSimpleStructsParam); - return Collections.unmodifiableList(listOfSimpleStructsParamCopy); - } - - static List copyFromBuilder(Collection listOfSimpleStructsParam) { - if (listOfSimpleStructsParam == null) { - return null; - } - return copy(listOfSimpleStructsParam.stream().map(SimpleStruct.Builder::build).collect(toList())); - } -} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/listofstringscopier.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/listofstringscopier.java deleted file mode 100644 index ec92ae9d8f9c..000000000000 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/listofstringscopier.java +++ /dev/null @@ -1,20 +0,0 @@ -package software.amazon.awssdk.services.jsonprotocoltests.model; - -import static java.util.stream.Collectors.toList; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import software.amazon.awssdk.annotations.Generated; - -@Generated("software.amazon.awssdk:codegen") -final class ListOfStringsCopier { - static List copy(Collection listOfStringsParam) { - if (listOfStringsParam == null) { - return null; - } - List listOfStringsParamCopy = new ArrayList<>(listOfStringsParam); - return Collections.unmodifiableList(listOfStringsParamCopy); - } -} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/mapofenumtoenumcopier.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/mapofenumtoenumcopier.java deleted file mode 100644 index 0595551ebd33..000000000000 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/mapofenumtoenumcopier.java +++ /dev/null @@ -1,43 +0,0 @@ -package software.amazon.awssdk.services.jsonprotocoltests.model; - -import static java.util.stream.Collectors.toMap; - -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; -import software.amazon.awssdk.annotations.Generated; - -@Generated("software.amazon.awssdk:codegen") -final class MapOfEnumToEnumCopier { - static Map copy(Map mapOfEnumToEnumParam) { - if (mapOfEnumToEnumParam == null) { - return null; - } - Map mapOfEnumToEnumParamCopy = mapOfEnumToEnumParam.entrySet().stream() - .collect(HashMap::new, (m, e) -> m.put(e.getKey(), e.getValue()), HashMap::putAll); - return Collections.unmodifiableMap(mapOfEnumToEnumParamCopy); - } - - static Map copyEnumToString(Map mapOfEnumToEnumParam) { - if (mapOfEnumToEnumParam == null) { - return null; - } - Map mapOfEnumToEnumParamCopy = mapOfEnumToEnumParam.entrySet().stream() - .collect(HashMap::new, (m, e) -> m.put(e.getKey().toString(), e.getValue().toString()), HashMap::putAll); - return Collections.unmodifiableMap(mapOfEnumToEnumParamCopy); - } - - static Map copyStringToEnum(Map mapOfEnumToEnumParam) { - if (mapOfEnumToEnumParam == null) { - return null; - } - Map mapOfEnumToEnumParamCopy = mapOfEnumToEnumParam.entrySet().stream() - .collect(HashMap::new, (m, e) -> { - EnumType keyAsEnum = EnumType.fromValue(e.getKey()); - if (keyAsEnum != EnumType.UNKNOWN_TO_SDK_VERSION) { - m.put(keyAsEnum, EnumType.fromValue(e.getValue())); - } - }, HashMap::putAll); - return Collections.unmodifiableMap(mapOfEnumToEnumParamCopy); - } -} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/mapofenumtolistofenumscopier.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/mapofenumtolistofenumscopier.java deleted file mode 100644 index fbf4e7ab92ed..000000000000 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/mapofenumtolistofenumscopier.java +++ /dev/null @@ -1,48 +0,0 @@ -package software.amazon.awssdk.services.jsonprotocoltests.model; - -import static java.util.stream.Collectors.toMap; - -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import software.amazon.awssdk.annotations.Generated; - -@Generated("software.amazon.awssdk:codegen") -final class MapOfEnumToListOfEnumsCopier { - static Map> copy(Map> mapOfEnumToListOfEnumsParam) { - if (mapOfEnumToListOfEnumsParam == null) { - return null; - } - Map> mapOfEnumToListOfEnumsParamCopy = mapOfEnumToListOfEnumsParam.entrySet().stream() - .collect(HashMap::new, (m, e) -> m.put(e.getKey(), ListOfEnumsCopier.copy(e.getValue())), HashMap::putAll); - return Collections.unmodifiableMap(mapOfEnumToListOfEnumsParamCopy); - } - - static Map> copyEnumToString(Map> mapOfEnumToListOfEnumsParam) { - if (mapOfEnumToListOfEnumsParam == null) { - return null; - } - Map> mapOfEnumToListOfEnumsParamCopy = mapOfEnumToListOfEnumsParam - .entrySet() - .stream() - .collect(HashMap::new, (m, e) -> m.put(e.getKey().toString(), ListOfEnumsCopier.copyEnumToString(e.getValue())), - HashMap::putAll); - return Collections.unmodifiableMap(mapOfEnumToListOfEnumsParamCopy); - } - - static Map> copyStringToEnum(Map> mapOfEnumToListOfEnumsParam) { - if (mapOfEnumToListOfEnumsParam == null) { - return null; - } - Map> mapOfEnumToListOfEnumsParamCopy = mapOfEnumToListOfEnumsParam.entrySet().stream() - .collect(HashMap::new, (m, e) -> { - EnumType keyAsEnum = EnumType.fromValue(e.getKey()); - if (keyAsEnum != EnumType.UNKNOWN_TO_SDK_VERSION) { - m.put(keyAsEnum, ListOfEnumsCopier.copyStringToEnum(e.getValue())); - } - }, HashMap::putAll); - return Collections.unmodifiableMap(mapOfEnumToListOfEnumsParamCopy); - } -} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/mapofenumtomapofstringtoenumcopier.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/mapofenumtomapofstringtoenumcopier.java deleted file mode 100644 index 758a7d09642f..000000000000 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/mapofenumtomapofstringtoenumcopier.java +++ /dev/null @@ -1,50 +0,0 @@ -package software.amazon.awssdk.services.jsonprotocoltests.model; - -import static java.util.stream.Collectors.toMap; - -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; -import software.amazon.awssdk.annotations.Generated; - -@Generated("software.amazon.awssdk:codegen") -final class MapOfEnumToMapOfStringToEnumCopier { - static Map> copy(Map> mapOfEnumToMapOfStringToEnumParam) { - if (mapOfEnumToMapOfStringToEnumParam == null) { - return null; - } - Map> mapOfEnumToMapOfStringToEnumParamCopy = mapOfEnumToMapOfStringToEnumParam.entrySet() - .stream() - .collect(HashMap::new, (m, e) -> m.put(e.getKey(), MapOfStringToEnumCopier.copy(e.getValue())), HashMap::putAll); - return Collections.unmodifiableMap(mapOfEnumToMapOfStringToEnumParamCopy); - } - - static Map> copyEnumToString( - Map> mapOfEnumToMapOfStringToEnumParam) { - if (mapOfEnumToMapOfStringToEnumParam == null) { - return null; - } - Map> mapOfEnumToMapOfStringToEnumParamCopy = mapOfEnumToMapOfStringToEnumParam - .entrySet() - .stream() - .collect(HashMap::new, - (m, e) -> m.put(e.getKey().toString(), MapOfStringToEnumCopier.copyEnumToString(e.getValue())), - HashMap::putAll); - return Collections.unmodifiableMap(mapOfEnumToMapOfStringToEnumParamCopy); - } - - static Map> copyStringToEnum( - Map> mapOfEnumToMapOfStringToEnumParam) { - if (mapOfEnumToMapOfStringToEnumParam == null) { - return null; - } - Map> mapOfEnumToMapOfStringToEnumParamCopy = mapOfEnumToMapOfStringToEnumParam.entrySet() - .stream().collect(HashMap::new, (m, e) -> { - EnumType keyAsEnum = EnumType.fromValue(e.getKey()); - if (keyAsEnum != EnumType.UNKNOWN_TO_SDK_VERSION) { - m.put(keyAsEnum, MapOfStringToEnumCopier.copyStringToEnum(e.getValue())); - } - }, HashMap::putAll); - return Collections.unmodifiableMap(mapOfEnumToMapOfStringToEnumParamCopy); - } -} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/mapofenumtosimplestructcopier.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/mapofenumtosimplestructcopier.java deleted file mode 100644 index 48adc125e350..000000000000 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/mapofenumtosimplestructcopier.java +++ /dev/null @@ -1,50 +0,0 @@ -package software.amazon.awssdk.services.jsonprotocoltests.model; - -import static java.util.stream.Collectors.toMap; - -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; -import software.amazon.awssdk.annotations.Generated; - -@Generated("software.amazon.awssdk:codegen") -final class MapOfEnumToSimpleStructCopier { - static Map copy(Map mapOfEnumToSimpleStructParam) { - if (mapOfEnumToSimpleStructParam == null) { - return null; - } - Map mapOfEnumToSimpleStructParamCopy = mapOfEnumToSimpleStructParam.entrySet().stream() - .collect(HashMap::new, (m, e) -> m.put(e.getKey(), e.getValue()), HashMap::putAll); - return Collections.unmodifiableMap(mapOfEnumToSimpleStructParamCopy); - } - - static Map copyFromBuilder(Map mapOfEnumToSimpleStructParam) { - if (mapOfEnumToSimpleStructParam == null) { - return null; - } - return copy(mapOfEnumToSimpleStructParam.entrySet().stream().collect(toMap(Map.Entry::getKey, e -> e.getValue().build()))); - } - - static Map copyEnumToString(Map mapOfEnumToSimpleStructParam) { - if (mapOfEnumToSimpleStructParam == null) { - return null; - } - Map mapOfEnumToSimpleStructParamCopy = mapOfEnumToSimpleStructParam.entrySet().stream() - .collect(HashMap::new, (m, e) -> m.put(e.getKey().toString(), e.getValue()), HashMap::putAll); - return Collections.unmodifiableMap(mapOfEnumToSimpleStructParamCopy); - } - - static Map copyStringToEnum(Map mapOfEnumToSimpleStructParam) { - if (mapOfEnumToSimpleStructParam == null) { - return null; - } - Map mapOfEnumToSimpleStructParamCopy = mapOfEnumToSimpleStructParam.entrySet().stream() - .collect(HashMap::new, (m, e) -> { - EnumType keyAsEnum = EnumType.fromValue(e.getKey()); - if (keyAsEnum != EnumType.UNKNOWN_TO_SDK_VERSION) { - m.put(keyAsEnum, e.getValue()); - } - }, HashMap::putAll); - return Collections.unmodifiableMap(mapOfEnumToSimpleStructParamCopy); - } -} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/mapofenumtostringcopier.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/mapofenumtostringcopier.java deleted file mode 100644 index bac86cc0f106..000000000000 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/mapofenumtostringcopier.java +++ /dev/null @@ -1,43 +0,0 @@ -package software.amazon.awssdk.services.jsonprotocoltests.model; - -import static java.util.stream.Collectors.toMap; - -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; -import software.amazon.awssdk.annotations.Generated; - -@Generated("software.amazon.awssdk:codegen") -final class MapOfEnumToStringCopier { - static Map copy(Map mapOfEnumToStringParam) { - if (mapOfEnumToStringParam == null) { - return null; - } - Map mapOfEnumToStringParamCopy = mapOfEnumToStringParam.entrySet().stream() - .collect(HashMap::new, (m, e) -> m.put(e.getKey(), e.getValue()), HashMap::putAll); - return Collections.unmodifiableMap(mapOfEnumToStringParamCopy); - } - - static Map copyEnumToString(Map mapOfEnumToStringParam) { - if (mapOfEnumToStringParam == null) { - return null; - } - Map mapOfEnumToStringParamCopy = mapOfEnumToStringParam.entrySet().stream() - .collect(HashMap::new, (m, e) -> m.put(e.getKey().toString(), e.getValue()), HashMap::putAll); - return Collections.unmodifiableMap(mapOfEnumToStringParamCopy); - } - - static Map copyStringToEnum(Map mapOfEnumToStringParam) { - if (mapOfEnumToStringParam == null) { - return null; - } - Map mapOfEnumToStringParamCopy = mapOfEnumToStringParam.entrySet().stream() - .collect(HashMap::new, (m, e) -> { - EnumType keyAsEnum = EnumType.fromValue(e.getKey()); - if (keyAsEnum != EnumType.UNKNOWN_TO_SDK_VERSION) { - m.put(keyAsEnum, e.getValue()); - } - }, HashMap::putAll); - return Collections.unmodifiableMap(mapOfEnumToStringParamCopy); - } -} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/mapofstringtoenumcopier.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/mapofstringtoenumcopier.java deleted file mode 100644 index 47931c3d813a..000000000000 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/mapofstringtoenumcopier.java +++ /dev/null @@ -1,39 +0,0 @@ -package software.amazon.awssdk.services.jsonprotocoltests.model; - -import static java.util.stream.Collectors.toMap; - -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; -import software.amazon.awssdk.annotations.Generated; - -@Generated("software.amazon.awssdk:codegen") -final class MapOfStringToEnumCopier { - static Map copy(Map mapOfStringToEnumParam) { - if (mapOfStringToEnumParam == null) { - return null; - } - Map mapOfStringToEnumParamCopy = mapOfStringToEnumParam.entrySet().stream() - .collect(HashMap::new, (m, e) -> m.put(e.getKey(), e.getValue()), HashMap::putAll); - return Collections.unmodifiableMap(mapOfStringToEnumParamCopy); - } - - static Map copyEnumToString(Map mapOfStringToEnumParam) { - if (mapOfStringToEnumParam == null) { - return null; - } - Map mapOfStringToEnumParamCopy = mapOfStringToEnumParam.entrySet().stream() - .collect(HashMap::new, (m, e) -> m.put(e.getKey(), e.getValue().toString()), HashMap::putAll); - return Collections.unmodifiableMap(mapOfStringToEnumParamCopy); - } - - static Map copyStringToEnum(Map mapOfStringToEnumParam) { - if (mapOfStringToEnumParam == null) { - return null; - } - Map mapOfStringToEnumParamCopy = mapOfStringToEnumParam.entrySet().stream() - .collect(HashMap::new, (m, e) -> m.put(e.getKey(), EnumType.fromValue(e.getValue())), HashMap::putAll); - return Collections.unmodifiableMap(mapOfStringToEnumParamCopy); - } -} - diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/mapofstringtointegerlistcopier.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/mapofstringtointegerlistcopier.java deleted file mode 100644 index da406a066cc5..000000000000 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/mapofstringtointegerlistcopier.java +++ /dev/null @@ -1,22 +0,0 @@ -package software.amazon.awssdk.services.jsonprotocoltests.model; - -import static java.util.stream.Collectors.toMap; - -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import software.amazon.awssdk.annotations.Generated; - -@Generated("software.amazon.awssdk:codegen") -final class MapOfStringToIntegerListCopier { - static Map> copy(Map> mapOfStringToIntegerListParam) { - if (mapOfStringToIntegerListParam == null) { - return null; - } - Map> mapOfStringToIntegerListParamCopy = mapOfStringToIntegerListParam.entrySet().stream() - .collect(HashMap::new, (m, e) -> m.put(e.getKey(), ListOfIntegersCopier.copy(e.getValue())), HashMap::putAll); - return Collections.unmodifiableMap(mapOfStringToIntegerListParamCopy); - } -} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/mapofstringtolistoflistofstringscopier.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/mapofstringtolistoflistofstringscopier.java deleted file mode 100644 index 4ec13c103357..000000000000 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/mapofstringtolistoflistofstringscopier.java +++ /dev/null @@ -1,25 +0,0 @@ -package software.amazon.awssdk.services.jsonprotocoltests.model; - -import static java.util.stream.Collectors.toMap; - -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import software.amazon.awssdk.annotations.Generated; - -@Generated("software.amazon.awssdk:codegen") -final class MapOfStringToListOfListOfStringsCopier { - static Map>> copy( - Map>> mapOfStringToListOfListOfStringsParam) { - if (mapOfStringToListOfListOfStringsParam == null) { - return null; - } - Map>> mapOfStringToListOfListOfStringsParamCopy = mapOfStringToListOfListOfStringsParam - .entrySet() - .stream() - .collect(HashMap::new, (m, e) -> m.put(e.getKey(), ListOfListOfStringsCopier.copy(e.getValue())), HashMap::putAll); - return Collections.unmodifiableMap(mapOfStringToListOfListOfStringsParamCopy); - } -} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/mapofstringtosimplestructcopier.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/mapofstringtosimplestructcopier.java deleted file mode 100644 index e476d5169819..000000000000 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/mapofstringtosimplestructcopier.java +++ /dev/null @@ -1,28 +0,0 @@ -package software.amazon.awssdk.services.jsonprotocoltests.model; - -import static java.util.stream.Collectors.toMap; - -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; -import software.amazon.awssdk.annotations.Generated; - -@Generated("software.amazon.awssdk:codegen") -final class MapOfStringToSimpleStructCopier { - static Map copy(Map mapOfStringToSimpleStructParam) { - if (mapOfStringToSimpleStructParam == null) { - return null; - } - Map mapOfStringToSimpleStructParamCopy = mapOfStringToSimpleStructParam.entrySet().stream() - .collect(HashMap::new, (m, e) -> m.put(e.getKey(), e.getValue()), HashMap::putAll); - return Collections.unmodifiableMap(mapOfStringToSimpleStructParamCopy); - } - - static Map copyFromBuilder(Map mapOfStringToSimpleStructParam) { - if (mapOfStringToSimpleStructParam == null) { - return null; - } - return copy(mapOfStringToSimpleStructParam.entrySet().stream() - .collect(toMap(Map.Entry::getKey, e -> e.getValue().build()))); - } -} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/mapofstringtostringcopier.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/mapofstringtostringcopier.java deleted file mode 100644 index a0421da95fc8..000000000000 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/mapofstringtostringcopier.java +++ /dev/null @@ -1,20 +0,0 @@ -package software.amazon.awssdk.services.jsonprotocoltests.model; - -import static java.util.stream.Collectors.toMap; - -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; -import software.amazon.awssdk.annotations.Generated; - -@Generated("software.amazon.awssdk:codegen") -final class MapOfStringToStringCopier { - static Map copy(Map mapOfStringToStringParam) { - if (mapOfStringToStringParam == null) { - return null; - } - Map mapOfStringToStringParamCopy = mapOfStringToStringParam.entrySet().stream() - .collect(HashMap::new, (m, e) -> m.put(e.getKey(), e.getValue()), HashMap::putAll); - return Collections.unmodifiableMap(mapOfStringToStringParamCopy); - } -} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/recursivelisttypecopier.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/recursivelisttypecopier.java deleted file mode 100644 index ad2eaaadc9a1..000000000000 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/recursivelisttypecopier.java +++ /dev/null @@ -1,27 +0,0 @@ -package software.amazon.awssdk.services.jsonprotocoltests.model; - -import static java.util.stream.Collectors.toList; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import software.amazon.awssdk.annotations.Generated; - -@Generated("software.amazon.awssdk:codegen") -final class RecursiveListTypeCopier { - static List copy(Collection recursiveListTypeParam) { - if (recursiveListTypeParam == null) { - return null; - } - List recursiveListTypeParamCopy = new ArrayList<>(recursiveListTypeParam); - return Collections.unmodifiableList(recursiveListTypeParamCopy); - } - - static List copyFromBuilder(Collection recursiveListTypeParam) { - if (recursiveListTypeParam == null) { - return null; - } - return copy(recursiveListTypeParam.stream().map(RecursiveStructType.Builder::build).collect(toList())); - } -} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/recursivemaptypecopier.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/recursivemaptypecopier.java deleted file mode 100644 index 8e4790ede443..000000000000 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/recursivemaptypecopier.java +++ /dev/null @@ -1,28 +0,0 @@ -package software.amazon.awssdk.services.jsonprotocoltests.model; - -import static java.util.stream.Collectors.toMap; - -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; -import software.amazon.awssdk.annotations.Generated; - -@Generated("software.amazon.awssdk:codegen") -final class RecursiveMapTypeCopier { - static Map copy(Map recursiveMapTypeParam) { - if (recursiveMapTypeParam == null) { - return null; - } - Map recursiveMapTypeParamCopy = recursiveMapTypeParam.entrySet().stream() - .collect(HashMap::new, (m, e) -> m.put(e.getKey(), e.getValue()), HashMap::putAll); - return Collections.unmodifiableMap(recursiveMapTypeParamCopy); - } - - static Map copyFromBuilder( - Map recursiveMapTypeParam) { - if (recursiveMapTypeParam == null) { - return null; - } - return copy(recursiveMapTypeParam.entrySet().stream().collect(toMap(Map.Entry::getKey, e -> e.getValue().build()))); - } -} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/recursivelisttypecopier.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/recursivelisttypecopier.java index 64fb00074196..3495ef987ec3 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/recursivelisttypecopier.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/recursivelisttypecopier.java @@ -21,8 +21,8 @@ static List copy(Collection recursiveL } static List copyFromBuilder(Collection recursiveListTypeParam) { - if (recursiveListTypeParam == null) { - return null; + if (recursiveListTypeParam == null || recursiveListTypeParam instanceof DefaultSdkAutoConstructList) { + return DefaultSdkAutoConstructList.getInstance(); } return copy(recursiveListTypeParam.stream().map(RecursiveStructType.Builder::build).collect(toList())); } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/recursivemaptypecopier.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/recursivemaptypecopier.java index cb449e16eb90..9e3a968aee42 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/recursivemaptypecopier.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/recursivemaptypecopier.java @@ -16,14 +16,14 @@ static Map copy(Map re return DefaultSdkAutoConstructMap.getInstance(); } Map recursiveMapTypeParamCopy = recursiveMapTypeParam.entrySet().stream() - .collect(HashMap::new, (m, e) -> m.put(e.getKey(), e.getValue()), HashMap::putAll); + .collect(HashMap::new, (m, e) -> m.put(e.getKey(), e.getValue()), HashMap::putAll); return Collections.unmodifiableMap(recursiveMapTypeParamCopy); } static Map copyFromBuilder( Map recursiveMapTypeParam) { - if (recursiveMapTypeParam == null) { - return null; + if (recursiveMapTypeParam == null || recursiveMapTypeParam instanceof DefaultSdkAutoConstructMap) { + return DefaultSdkAutoConstructMap.getInstance(); } return copy(recursiveMapTypeParam.entrySet().stream().collect(toMap(Map.Entry::getKey, e -> e.getValue().build()))); } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/recursivestructtype.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/recursivestructtype.java index 25d70227eb9c..6ec4f64713a4 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/recursivestructtype.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/recursivestructtype.java @@ -177,8 +177,8 @@ public final int hashCode() { int hashCode = 1; hashCode = 31 * hashCode + Objects.hashCode(noRecurse()); hashCode = 31 * hashCode + Objects.hashCode(recursiveStruct()); - hashCode = 31 * hashCode + Objects.hashCode(recursiveList()); - hashCode = 31 * hashCode + Objects.hashCode(recursiveMap()); + hashCode = 31 * hashCode + Objects.hashCode(hasRecursiveList() ? recursiveList() : null); + hashCode = 31 * hashCode + Objects.hashCode(hasRecursiveMap() ? recursiveMap() : null); return hashCode; } @@ -200,7 +200,8 @@ public final boolean equalsBySdkFields(Object obj) { } RecursiveStructType other = (RecursiveStructType) obj; return Objects.equals(noRecurse(), other.noRecurse()) && Objects.equals(recursiveStruct(), other.recursiveStruct()) - && Objects.equals(recursiveList(), other.recursiveList()) && Objects.equals(recursiveMap(), other.recursiveMap()); + && hasRecursiveList() == other.hasRecursiveList() && Objects.equals(recursiveList(), other.recursiveList()) + && hasRecursiveMap() == other.hasRecursiveMap() && Objects.equals(recursiveMap(), other.recursiveMap()); } /** @@ -210,7 +211,8 @@ public final boolean equalsBySdkFields(Object obj) { @Override public final String toString() { return ToString.builder("RecursiveStructType").add("NoRecurse", noRecurse()).add("RecursiveStruct", recursiveStruct()) - .add("RecursiveList", recursiveList()).add("RecursiveMap", recursiveMap()).build(); + .add("RecursiveList", hasRecursiveList() ? recursiveList() : null) + .add("RecursiveMap", hasRecursiveMap() ? recursiveMap() : null).build(); } public final Optional getValueForField(String fieldName, Class clazz) { @@ -370,6 +372,9 @@ public final void setRecursiveStruct(BuilderImpl recursiveStruct) { } public final Collection getRecursiveList() { + if (recursiveList instanceof SdkAutoConstructList) { + return null; + } return recursiveList != null ? recursiveList.stream().map(RecursiveStructType::toBuilder) .collect(Collectors.toList()) : null; } @@ -400,6 +405,9 @@ public final void setRecursiveList(Collection recursiveList) { } public final Map getRecursiveMap() { + if (recursiveMap instanceof SdkAutoConstructMap) { + return null; + } return recursiveMap != null ? CollectionUtils.mapValues(recursiveMap, RecursiveStructType::toBuilder) : null; } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/service-2.json b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/service-2.json index c14e193475bd..3e31e3d4ead7 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/service-2.json +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/service-2.json @@ -137,7 +137,8 @@ "RecursiveStruct":{"shape":"RecursiveStructType"}, "PolymorphicTypeWithSubTypes":{"shape":"BaseType"}, "PolymorphicTypeWithoutSubTypes":{"shape":"SubTypeOne"}, - "EnumType":{"shape":"EnumType"} + "EnumType":{"shape":"EnumType"}, + "Underscore_Name_Type":{"shape":"Underscore_Name_Type"} } }, "BaseType":{ @@ -474,6 +475,9 @@ }, "event": true }, - "ImplicitPayloadMemberOne":{"type":"blob"} + "ImplicitPayloadMemberOne":{"type":"blob"}, + "Underscore_Name_Type": { + "type": "structure" + } } } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/underscore_name_type.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/underscore_name_type.java new file mode 100644 index 000000000000..91328161e3ec --- /dev/null +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/underscore_name_type.java @@ -0,0 +1,104 @@ +package software.amazon.awssdk.services.jsonprotocoltests.model; + +import java.io.Serializable; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Optional; +import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.core.SdkField; +import software.amazon.awssdk.core.SdkPojo; +import software.amazon.awssdk.utils.ToString; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +/** + */ +@Generated("software.amazon.awssdk:codegen") +public final class Underscore_Name_Type implements SdkPojo, Serializable, + ToCopyableBuilder { + private static final List> SDK_FIELDS = Collections.unmodifiableList(Arrays.asList()); + + private static final long serialVersionUID = 1L; + + private Underscore_Name_Type(BuilderImpl builder) { + } + + @Override + public Builder toBuilder() { + return new BuilderImpl(this); + } + + public static Builder builder() { + return new BuilderImpl(); + } + + public static Class serializableBuilderClass() { + return BuilderImpl.class; + } + + @Override + public final int hashCode() { + int hashCode = 1; + return hashCode; + } + + @Override + public final boolean equals(Object obj) { + return equalsBySdkFields(obj); + } + + @Override + public final boolean equalsBySdkFields(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (!(obj instanceof Underscore_Name_Type)) { + return false; + } + return true; + } + + /** + * Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be + * redacted from this string using a placeholder value. + */ + @Override + public final String toString() { + return ToString.builder("Underscore_Name_Type").build(); + } + + public final Optional getValueForField(String fieldName, Class clazz) { + return Optional.empty(); + } + + @Override + public final List> sdkFields() { + return SDK_FIELDS; + } + + public interface Builder extends SdkPojo, CopyableBuilder { + } + + static final class BuilderImpl implements Builder { + private BuilderImpl() { + } + + private BuilderImpl(Underscore_Name_Type model) { + } + + @Override + public Underscore_Name_Type build() { + return new Underscore_Name_Type(this); + } + + @Override + public List> sdkFields() { + return SDK_FIELDS; + } + } +} + diff --git a/core/annotations/pom.xml b/core/annotations/pom.xml index d21975cbe0e6..dfcd479134d8 100644 --- a/core/annotations/pom.xml +++ b/core/annotations/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT 4.0.0 diff --git a/core/annotations/src/main/java/software/amazon/awssdk/annotations/SdkInternalApi.java b/core/annotations/src/main/java/software/amazon/awssdk/annotations/SdkInternalApi.java index 16117b3bbd1b..fe14c2146895 100644 --- a/core/annotations/src/main/java/software/amazon/awssdk/annotations/SdkInternalApi.java +++ b/core/annotations/src/main/java/software/amazon/awssdk/annotations/SdkInternalApi.java @@ -19,7 +19,7 @@ import java.lang.annotation.Target; /** - * Marker interface for 'internal' APIs that should not be used outside the core module. Breaking + * Marker interface for 'internal' APIs that should not be used outside the same module. Breaking * changes can and will be introduced to elements marked as {@link SdkInternalApi}. Users of the SDK * and the generated clients themselves should not depend on any packages, types, fields, * constructors, or methods with this annotation. diff --git a/core/arns/pom.xml b/core/arns/pom.xml index be2d10685924..3d349cda91ab 100644 --- a/core/arns/pom.xml +++ b/core/arns/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT 4.0.0 diff --git a/core/auth/pom.xml b/core/auth/pom.xml index 226f92c1e74a..3983250b61c2 100644 --- a/core/auth/pom.xml +++ b/core/auth/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT auth diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/ProfileCredentialsProviderFactory.java b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/ProfileCredentialsProviderFactory.java new file mode 100644 index 000000000000..8d146280f106 --- /dev/null +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/ProfileCredentialsProviderFactory.java @@ -0,0 +1,29 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.auth.credentials; + +import software.amazon.awssdk.annotations.SdkProtectedApi; +import software.amazon.awssdk.profiles.Profile; + +/** + * A factory for {@link AwsCredentialsProvider}s, which can be used to create different credentials providers with + * different Profile properties. + */ +@FunctionalInterface +@SdkProtectedApi +public interface ProfileCredentialsProviderFactory { + AwsCredentialsProvider create(Profile profile); +} diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/internal/ProfileCredentialsUtils.java b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/internal/ProfileCredentialsUtils.java index c57cfe17b6c9..202b0fbad000 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/internal/ProfileCredentialsUtils.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/internal/ProfileCredentialsUtils.java @@ -35,8 +35,10 @@ import software.amazon.awssdk.auth.credentials.EnvironmentVariableCredentialsProvider; import software.amazon.awssdk.auth.credentials.InstanceProfileCredentialsProvider; import software.amazon.awssdk.auth.credentials.ProcessCredentialsProvider; +import software.amazon.awssdk.auth.credentials.ProfileCredentialsProviderFactory; import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; import software.amazon.awssdk.auth.credentials.SystemPropertyCredentialsProvider; +import software.amazon.awssdk.core.internal.util.ClassLoaderHelper; import software.amazon.awssdk.profiles.Profile; import software.amazon.awssdk.profiles.ProfileProperty; import software.amazon.awssdk.utils.SdkAutoCloseable; @@ -49,6 +51,8 @@ public final class ProfileCredentialsUtils { private static final String STS_PROFILE_CREDENTIALS_PROVIDER_FACTORY = "software.amazon.awssdk.services.sts.internal.StsProfileCredentialsProviderFactory"; + private static final String SSO_PROFILE_CREDENTIALS_PROVIDER_FACTORY = + "software.amazon.awssdk.services.sso.auth.SsoProfileCredentialsProviderFactory"; private final Profile profile; @@ -94,19 +98,22 @@ public Optional credentialsProvider() { * @param children The child profiles that source credentials from this profile. */ private Optional credentialsProvider(Set children) { + if (properties.containsKey(ProfileProperty.ROLE_ARN) && properties.containsKey(ProfileProperty.WEB_IDENTITY_TOKEN_FILE)) { + return Optional.ofNullable(roleAndWebIdentityTokenProfileCredentialsProvider()); + } + + if (properties.containsKey(ProfileProperty.SSO_ROLE_NAME) || properties.containsKey(ProfileProperty.SSO_ACCOUNT_ID) + || properties.containsKey(ProfileProperty.SSO_REGION) || properties.containsKey(ProfileProperty.SSO_START_URL)) { + return Optional.ofNullable(ssoProfileCredentialsProvider()); + } + if (properties.containsKey(ProfileProperty.ROLE_ARN)) { boolean hasSourceProfile = properties.containsKey(ProfileProperty.SOURCE_PROFILE); boolean hasCredentialSource = properties.containsKey(ProfileProperty.CREDENTIAL_SOURCE); - boolean hasWebIdentityTokenFile = properties.containsKey(ProfileProperty.WEB_IDENTITY_TOKEN_FILE); - boolean hasRoleArn = properties.containsKey(ProfileProperty.ROLE_ARN); Validate.validState(!(hasSourceProfile && hasCredentialSource), "Invalid profile file: profile has both %s and %s.", ProfileProperty.SOURCE_PROFILE, ProfileProperty.CREDENTIAL_SOURCE); - if (hasWebIdentityTokenFile && hasRoleArn) { - return Optional.ofNullable(roleAndWebIdentityTokenProfileCredentialsProvider()); - } - if (hasSourceProfile) { return Optional.ofNullable(roleAndSourceProfileBasedProfileCredentialsProvider(children)); } @@ -163,6 +170,17 @@ private AwsCredentialsProvider credentialProcessCredentialsProvider() { .build(); } + /** + * Create the SSO credentials provider based on the related profile properties. + */ + private AwsCredentialsProvider ssoProfileCredentialsProvider() { + requireProperties(ProfileProperty.SSO_ACCOUNT_ID, + ProfileProperty.SSO_REGION, + ProfileProperty.SSO_ROLE_NAME, + ProfileProperty.SSO_START_URL); + return ssoCredentialsProviderFactory().create(profile); + } + private AwsCredentialsProvider roleAndWebIdentityTokenProfileCredentialsProvider() { requireProperties(ProfileProperty.ROLE_ARN, ProfileProperty.WEB_IDENTITY_TOKEN_FILE); @@ -252,8 +270,8 @@ private IllegalStateException noSourceCredentialsException() { */ private ChildProfileCredentialsProviderFactory stsCredentialsProviderFactory() { try { - Class stsCredentialsProviderFactory = Class.forName(STS_PROFILE_CREDENTIALS_PROVIDER_FACTORY, true, - Thread.currentThread().getContextClassLoader()); + Class stsCredentialsProviderFactory = ClassLoaderHelper.loadClass(STS_PROFILE_CREDENTIALS_PROVIDER_FACTORY, + getClass()); return (ChildProfileCredentialsProviderFactory) stsCredentialsProviderFactory.getConstructor().newInstance(); } catch (ClassNotFoundException e) { throw new IllegalStateException("To use assumed roles in the '" + name + "' profile, the 'sts' service module must " @@ -262,4 +280,20 @@ private ChildProfileCredentialsProviderFactory stsCredentialsProviderFactory() { throw new IllegalStateException("Failed to create the '" + name + "' profile credentials provider.", e); } } + + /** + * Load the factory that can be used to create the SSO credentials provider, assuming it is on the classpath. + */ + private ProfileCredentialsProviderFactory ssoCredentialsProviderFactory() { + try { + Class ssoProfileCredentialsProviderFactory = ClassLoaderHelper.loadClass(SSO_PROFILE_CREDENTIALS_PROVIDER_FACTORY, + getClass()); + return (ProfileCredentialsProviderFactory) ssoProfileCredentialsProviderFactory.getConstructor().newInstance(); + } catch (ClassNotFoundException e) { + throw new IllegalStateException("To use Sso related properties in the '" + name + "' profile, the 'sso' service " + + "module must be on the class path.", e); + } catch (NoSuchMethodException | InvocationTargetException | InstantiationException | IllegalAccessException e) { + throw new IllegalStateException("Failed to create the '" + name + "' profile credentials provider.", e); + } + } } diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/internal/WebIdentityCredentialsUtils.java b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/internal/WebIdentityCredentialsUtils.java index b73f7ec1dfce..44fe98d4bd20 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/internal/WebIdentityCredentialsUtils.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/internal/WebIdentityCredentialsUtils.java @@ -18,12 +18,15 @@ import java.lang.reflect.InvocationTargetException; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.auth.credentials.WebIdentityTokenCredentialsProviderFactory; +import software.amazon.awssdk.core.internal.util.ClassLoaderHelper; +import software.amazon.awssdk.utils.Logger; /** * Utility class used to configure credential providers based on JWT web identity tokens. */ @SdkInternalApi public final class WebIdentityCredentialsUtils { + private static final Logger log = Logger.loggerFor(WebIdentityCredentialsUtils.class); private static final String STS_WEB_IDENTITY_CREDENTIALS_PROVIDER_FACTORY = "software.amazon.awssdk.services.sts.internal.StsWebIdentityCredentialsProviderFactory"; @@ -39,11 +42,13 @@ private WebIdentityCredentialsUtils() { */ public static WebIdentityTokenCredentialsProviderFactory factory() { try { - Class stsCredentialsProviderFactory = Class.forName(STS_WEB_IDENTITY_CREDENTIALS_PROVIDER_FACTORY, true, - Thread.currentThread().getContextClassLoader()); + Class stsCredentialsProviderFactory = ClassLoaderHelper.loadClass(STS_WEB_IDENTITY_CREDENTIALS_PROVIDER_FACTORY, + WebIdentityCredentialsUtils.class); return (WebIdentityTokenCredentialsProviderFactory) stsCredentialsProviderFactory.getConstructor().newInstance(); } catch (ClassNotFoundException e) { - throw new IllegalStateException("To use web identity tokens, the 'sts' service module must be on the class path.", e); + String message = "To use web identity tokens, the 'sts' service module must be on the class path."; + log.warn(() -> message); + throw new IllegalStateException(message, e); } catch (NoSuchMethodException | InvocationTargetException | InstantiationException | IllegalAccessException e) { throw new IllegalStateException("Failed to create a web identity token credentials provider.", e); } diff --git a/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/internal/LazyAwsCredentialsProviderTest.java b/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/internal/LazyAwsCredentialsProviderTest.java index 25fcbd0f1f2a..e712ea6257ce 100644 --- a/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/internal/LazyAwsCredentialsProviderTest.java +++ b/core/auth/src/test/java/software/amazon/awssdk/auth/credentials/internal/LazyAwsCredentialsProviderTest.java @@ -20,6 +20,7 @@ import org.junit.Test; import org.mockito.Mockito; import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.utils.SdkAutoCloseable; public class LazyAwsCredentialsProviderTest { @SuppressWarnings("unchecked") @@ -48,4 +49,30 @@ public void resolveCredentialsInvokesSupplierExactlyOnce() { Mockito.verify(credentialsConstructor, Mockito.times(1)).get(); Mockito.verify(credentials, Mockito.times(2)).resolveCredentials(); } + + @Test + public void delegatesClosesInitializerAndValue() { + CloseableSupplier initializer = Mockito.mock(CloseableSupplier.class); + CloseableCredentialsProvider value = Mockito.mock(CloseableCredentialsProvider.class); + + Mockito.when(initializer.get()).thenReturn(value); + + LazyAwsCredentialsProvider.create(initializer).close(); + + Mockito.verify(initializer).close(); + Mockito.verify(value).close(); + } + + @Test + public void delegatesClosesInitializerEvenIfGetFails() { + CloseableSupplier initializer = Mockito.mock(CloseableSupplier.class); + Mockito.when(initializer.get()).thenThrow(new RuntimeException()); + + LazyAwsCredentialsProvider.create(initializer).close(); + + Mockito.verify(initializer).close(); + } + + private interface CloseableSupplier extends Supplier, SdkAutoCloseable {} + private interface CloseableCredentialsProvider extends SdkAutoCloseable, AwsCredentialsProvider {} } diff --git a/core/aws-core/pom.xml b/core/aws-core/pom.xml index b6e26e17fc1e..0d2e874074c7 100644 --- a/core/aws-core/pom.xml +++ b/core/aws-core/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT aws-core diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/eventstream/EventStreamAsyncResponseTransformer.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/eventstream/EventStreamAsyncResponseTransformer.java index d8437707427e..4f4ec847504b 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/eventstream/EventStreamAsyncResponseTransformer.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/eventstream/EventStreamAsyncResponseTransformer.java @@ -394,7 +394,9 @@ public void onError(Throwable throwable) { @Override public void onComplete() { // Add the special on complete event to signal drainEvents to complete the subscriber - eventsToDeliver.add(ON_COMPLETE_EVENT); + synchronized (eventsToDeliver) { + eventsToDeliver.add(ON_COMPLETE_EVENT); + } drainEventsIfNotAlready(); transformFuture.complete(null); } diff --git a/core/metrics-spi/pom.xml b/core/metrics-spi/pom.xml index 0806742e27f3..d90aea8cfbf3 100644 --- a/core/metrics-spi/pom.xml +++ b/core/metrics-spi/pom.xml @@ -5,7 +5,7 @@ core software.amazon.awssdk - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT 4.0.0 diff --git a/core/pom.xml b/core/pom.xml index 393ae2d03ed6..82acaafa6716 100644 --- a/core/pom.xml +++ b/core/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT core diff --git a/core/profiles/pom.xml b/core/profiles/pom.xml index 960072631958..10feec04f8fa 100644 --- a/core/profiles/pom.xml +++ b/core/profiles/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT profiles diff --git a/core/profiles/src/main/java/software/amazon/awssdk/profiles/ProfileFileLocation.java b/core/profiles/src/main/java/software/amazon/awssdk/profiles/ProfileFileLocation.java index a3f5dc63db15..bbb5fea32535 100644 --- a/core/profiles/src/main/java/software/amazon/awssdk/profiles/ProfileFileLocation.java +++ b/core/profiles/src/main/java/software/amazon/awssdk/profiles/ProfileFileLocation.java @@ -15,16 +15,15 @@ package software.amazon.awssdk.profiles; +import static software.amazon.awssdk.utils.UserHomeDirectoryUtils.userHomeDirectory; + import java.nio.file.FileSystems; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.util.Optional; import java.util.regex.Pattern; -import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.annotations.SdkPublicApi; -import software.amazon.awssdk.utils.JavaSystemSetting; -import software.amazon.awssdk.utils.StringUtils; /** * A collection of static methods for loading the location for configuration and credentials files. @@ -46,7 +45,7 @@ private ProfileFileLocation() { public static Path configurationFilePath() { return resolveProfileFilePath( ProfileFileSystemSetting.AWS_CONFIG_FILE.getStringValue() - .orElse(Paths.get(ProfileFileLocation.userHomeDirectory(), + .orElse(Paths.get(userHomeDirectory(), ".aws", "config").toString())); } @@ -78,43 +77,6 @@ public static Optional credentialsFileLocation() { return resolveIfExists(credentialsFilePath()); } - /** - * Load the home directory that should be used for the profile file. This will check the same environment variables as the CLI - * to identify the location of home, before falling back to java-specific resolution. - */ - @SdkInternalApi - static String userHomeDirectory() { - boolean isWindows = JavaSystemSetting.OS_NAME.getStringValue() - .map(s -> StringUtils.lowerCase(s).startsWith("windows")) - .orElse(false); - - // To match the logic of the CLI we have to consult environment variables directly. - // CHECKSTYLE:OFF - String home = System.getenv("HOME"); - - if (home != null) { - return home; - } - - if (isWindows) { - String userProfile = System.getenv("USERPROFILE"); - - if (userProfile != null) { - return userProfile; - } - - String homeDrive = System.getenv("HOMEDRIVE"); - String homePath = System.getenv("HOMEPATH"); - - if (homeDrive != null && homePath != null) { - return homeDrive + homePath; - } - } - - return JavaSystemSetting.USER_HOME.getStringValueOrThrow(); - // CHECKSTYLE:ON - } - private static Path resolveProfileFilePath(String path) { // Resolve ~ using the CLI's logic, not whatever Java decides to do with it. if (HOME_DIRECTORY_PATTERN.matcher(path).matches()) { diff --git a/core/profiles/src/main/java/software/amazon/awssdk/profiles/ProfileProperty.java b/core/profiles/src/main/java/software/amazon/awssdk/profiles/ProfileProperty.java index 6f1070ea8a2f..cc7b886495f8 100644 --- a/core/profiles/src/main/java/software/amazon/awssdk/profiles/ProfileProperty.java +++ b/core/profiles/src/main/java/software/amazon/awssdk/profiles/ProfileProperty.java @@ -99,6 +99,28 @@ public final class ProfileProperty { */ public static final String RETRY_MODE = "retry_mode"; + /** + * Aws region where the SSO directory for the given 'sso_start_url' is hosted. This is independent of the general 'region'. + */ + public static final String SSO_REGION = "sso_region"; + + /** + * The corresponding IAM role in the AWS account that temporary AWS credentials will be resolved for. + */ + public static final String SSO_ROLE_NAME = "sso_role_name"; + + /** + * AWS account ID that temporary AWS credentials will be resolved for. + */ + public static final String SSO_ACCOUNT_ID = "sso_account_id"; + + /** + * Start url provided by the SSO service via the console. It's the main URL used for login to the SSO directory. + * This is also referred to as the "User Portal URL" and can also be used to login to the SSO web interface for AWS + * console access. + */ + public static final String SSO_START_URL = "sso_start_url"; + private ProfileProperty() { } } diff --git a/core/profiles/src/main/java/software/amazon/awssdk/profiles/internal/ProfileFileReader.java b/core/profiles/src/main/java/software/amazon/awssdk/profiles/internal/ProfileFileReader.java index c62ca7a15ce7..fd8970f4ab49 100644 --- a/core/profiles/src/main/java/software/amazon/awssdk/profiles/internal/ProfileFileReader.java +++ b/core/profiles/src/main/java/software/amazon/awssdk/profiles/internal/ProfileFileReader.java @@ -42,7 +42,7 @@ public final class ProfileFileReader { private static final Pattern EMPTY_LINE = Pattern.compile("^[\t ]*$"); - private static final Pattern VALID_IDENTIFIER = Pattern.compile("^[A-Za-z0-9_\\-/.%@:]*$"); + private static final Pattern VALID_IDENTIFIER = Pattern.compile("^[A-Za-z0-9_\\-/.%@:\\+]*$"); private ProfileFileReader() { } @@ -214,7 +214,7 @@ private static Optional parseProfileDefinition(ParserState state, String // If the profile name includes invalid characters, it should be ignored. if (!isValidIdentifier(profileName)) { log.warn(() -> "Ignoring profile '" + standardizedProfileName + "' on line " + state.currentLineNumber + " because " + - "it was not alphanumeric with only these special characters: - / . % @ _ :"); + "it was not alphanumeric with only these special characters: - / . % @ _ : +"); return Optional.empty(); } @@ -257,7 +257,7 @@ private static Optional> parsePropertyDefinition(ParserStat // If the profile name includes invalid characters, it should be ignored. if (!isValidIdentifier(propertyKey)) { log.warn(() -> "Ignoring property '" + propertyKey + "' on line " + state.currentLineNumber + " because " + - "its name was not alphanumeric with only these special characters: - / . % @ _ :"); + "its name was not alphanumeric with only these special characters: - / . % @ _ : +"); return Optional.empty(); } diff --git a/core/profiles/src/test/java/software/amazon/awssdk/profiles/ProfileFileTest.java b/core/profiles/src/test/java/software/amazon/awssdk/profiles/ProfileFileTest.java index 82377ae6c6aa..116bb48df3e1 100644 --- a/core/profiles/src/test/java/software/amazon/awssdk/profiles/ProfileFileTest.java +++ b/core/profiles/src/test/java/software/amazon/awssdk/profiles/ProfileFileTest.java @@ -326,14 +326,14 @@ public void invalidPropertyNamesAreIgnored() { @Test public void allValidProfileNameCharactersAreSupported() { - assertThat(configFileProfiles("[profile ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_./%@:]")) - .isEqualTo(profiles(profile("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_./%@:"))); + assertThat(configFileProfiles("[profile ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_./%@:+]")) + .isEqualTo(profiles(profile("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_./%@:+"))); } @Test public void allValidPropertyNameCharactersAreSupported() { - assertThat(configFileProfiles("[profile foo]\nABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_./%@: = value")) - .isEqualTo(profiles(profile("foo", property("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_./%@:", + assertThat(configFileProfiles("[profile foo]\nABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_./%@:+ = value")) + .isEqualTo(profiles(profile("foo", property("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_./%@:+", "value")))); } diff --git a/core/protocols/aws-cbor-protocol/pom.xml b/core/protocols/aws-cbor-protocol/pom.xml index 0187b7d0d6dd..b2ea7756b013 100644 --- a/core/protocols/aws-cbor-protocol/pom.xml +++ b/core/protocols/aws-cbor-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT 4.0.0 diff --git a/core/protocols/aws-ion-protocol/pom.xml b/core/protocols/aws-ion-protocol/pom.xml index 764220170ad6..d4255b0a0120 100644 --- a/core/protocols/aws-ion-protocol/pom.xml +++ b/core/protocols/aws-ion-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT 4.0.0 diff --git a/core/protocols/aws-json-protocol/pom.xml b/core/protocols/aws-json-protocol/pom.xml index 1aa0196b5af0..f7f2bb1af105 100644 --- a/core/protocols/aws-json-protocol/pom.xml +++ b/core/protocols/aws-json-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT 4.0.0 diff --git a/core/protocols/aws-query-protocol/pom.xml b/core/protocols/aws-query-protocol/pom.xml index e6622d945459..2c998138e1ca 100644 --- a/core/protocols/aws-query-protocol/pom.xml +++ b/core/protocols/aws-query-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT 4.0.0 diff --git a/core/protocols/aws-xml-protocol/pom.xml b/core/protocols/aws-xml-protocol/pom.xml index da83e1fe423a..393b1feb0335 100644 --- a/core/protocols/aws-xml-protocol/pom.xml +++ b/core/protocols/aws-xml-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT 4.0.0 diff --git a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/XmlResponseParserUtils.java b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/XmlResponseParserUtils.java index 78967fdea9a2..97023b37709b 100644 --- a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/XmlResponseParserUtils.java +++ b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/XmlResponseParserUtils.java @@ -69,6 +69,6 @@ private static boolean hasPayloadMembers(SdkPojo sdkPojo) { } private static boolean contentLengthZero(SdkHttpFullResponse response) { - return response.firstMatchingHeader(CONTENT_LENGTH).filter(h -> Long.parseLong(h) == 0).isPresent(); + return response.firstMatchingHeader(CONTENT_LENGTH).map(l -> Long.parseLong(l) == 0).orElse(false); } } diff --git a/core/protocols/pom.xml b/core/protocols/pom.xml index 9c460ebe47f9..b97549e8822d 100644 --- a/core/protocols/pom.xml +++ b/core/protocols/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT 4.0.0 diff --git a/core/protocols/protocol-core/pom.xml b/core/protocols/protocol-core/pom.xml index d150071a6f1a..0b03a96dc651 100644 --- a/core/protocols/protocol-core/pom.xml +++ b/core/protocols/protocol-core/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT 4.0.0 diff --git a/core/regions/pom.xml b/core/regions/pom.xml index 4041def88da3..f9a57f55b691 100644 --- a/core/regions/pom.xml +++ b/core/regions/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT regions diff --git a/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json b/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json index 490046774f6b..eb9deac60b4c 100644 --- a/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json +++ b/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json @@ -242,6 +242,19 @@ "us-west-2" : { } } }, + "airflow" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, "api.detective" : { "defaults" : { "protocols" : [ "https" ] @@ -557,6 +570,17 @@ "us-west-2" : { } } }, + "app-integrations" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-west-2" : { }, + "us-east-1" : { }, + "us-west-2" : { } + } + }, "appflow" : { "endpoints" : { "ap-northeast-1" : { }, @@ -614,6 +638,7 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -652,6 +677,7 @@ }, "appsync" : { "endpoints" : { + "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, @@ -664,6 +690,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -673,6 +700,7 @@ }, "athena" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -726,6 +754,7 @@ "protocols" : [ "http", "https" ] }, "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -735,6 +764,7 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -748,6 +778,7 @@ }, "backup" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -757,6 +788,7 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -841,15 +873,14 @@ }, "chime" : { "defaults" : { - "protocols" : [ "https" ], - "sslCommonName" : "service.chime.aws.amazon.com" + "protocols" : [ "https" ] }, "endpoints" : { "aws-global" : { "credentialScope" : { "region" : "us-east-1" }, - "hostname" : "service.chime.aws.amazon.com", + "hostname" : "chime.us-east-1.amazonaws.com", "protocols" : [ "https" ] } }, @@ -1292,8 +1323,10 @@ "ap-southeast-2" : { }, "ca-central-1" : { }, "eu-central-1" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, + "eu-west-3" : { }, "fips-us-east-1" : { "credentialScope" : { "region" : "us-east-1" @@ -1312,8 +1345,10 @@ }, "hostname" : "cognito-identity-fips.us-west-2.amazonaws.com" }, + "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, + "us-west-1" : { }, "us-west-2" : { } } }, @@ -1326,8 +1361,10 @@ "ap-southeast-2" : { }, "ca-central-1" : { }, "eu-central-1" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, + "eu-west-3" : { }, "fips-us-east-1" : { "credentialScope" : { "region" : "us-east-1" @@ -1346,8 +1383,10 @@ }, "hostname" : "cognito-idp-fips.us-west-2.amazonaws.com" }, + "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, + "us-west-1" : { }, "us-west-2" : { } } }, @@ -1434,6 +1473,7 @@ }, "config" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -1443,9 +1483,34 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "config-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "config-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "config-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "config-fips.us-west-2.amazonaws.com" + }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, @@ -1465,6 +1530,13 @@ "us-west-2" : { } } }, + "contact-lens" : { + "endpoints" : { + "ap-southeast-2" : { }, + "us-east-1" : { }, + "us-west-2" : { } + } + }, "cur" : { "endpoints" : { "us-east-1" : { } @@ -1762,6 +1834,12 @@ }, "hostname" : "rds.eu-west-3.amazonaws.com" }, + "sa-east-1" : { + "credentialScope" : { + "region" : "sa-east-1" + }, + "hostname" : "rds.sa-east-1.amazonaws.com" + }, "us-east-1" : { "credentialScope" : { "region" : "us-east-1" @@ -1916,6 +1994,36 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "hostname" : "ebs-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "ebs-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "ebs-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "ebs-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "ebs-fips.us-west-2.amazonaws.com" + }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, @@ -2060,6 +2168,12 @@ }, "hostname" : "fips.eks.us-east-2.amazonaws.com" }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "fips.eks.us-west-1.amazonaws.com" + }, "fips-us-west-2" : { "credentialScope" : { "region" : "us-west-2" @@ -2586,6 +2700,18 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "fips-af-south-1" : { + "credentialScope" : { + "region" : "af-south-1" + }, + "hostname" : "fms-fips.af-south-1.amazonaws.com" + }, + "fips-ap-east-1" : { + "credentialScope" : { + "region" : "ap-east-1" + }, + "hostname" : "fms-fips.ap-east-1.amazonaws.com" + }, "fips-ap-northeast-1" : { "credentialScope" : { "region" : "ap-northeast-1" @@ -2628,6 +2754,12 @@ }, "hostname" : "fms-fips.eu-central-1.amazonaws.com" }, + "fips-eu-south-1" : { + "credentialScope" : { + "region" : "eu-south-1" + }, + "hostname" : "fms-fips.eu-south-1.amazonaws.com" + }, "fips-eu-west-1" : { "credentialScope" : { "region" : "eu-west-1" @@ -2646,6 +2778,12 @@ }, "hostname" : "fms-fips.eu-west-3.amazonaws.com" }, + "fips-me-south-1" : { + "credentialScope" : { + "region" : "me-south-1" + }, + "hostname" : "fms-fips.me-south-1.amazonaws.com" + }, "fips-sa-east-1" : { "credentialScope" : { "region" : "sa-east-1" @@ -2810,6 +2948,7 @@ }, "glue" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -2951,7 +3090,12 @@ }, "health" : { "endpoints" : { - "us-east-1" : { } + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "health-fips.us-east-2.amazonaws.com" + } } }, "honeycode" : { @@ -3291,6 +3435,7 @@ "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, + "us-west-1" : { }, "us-west-2" : { } } }, @@ -3351,6 +3496,30 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "lakeformation-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "lakeformation-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "lakeformation-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "lakeformation-fips.us-west-2.amazonaws.com" + }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -3519,6 +3688,17 @@ "us-west-2" : { } } }, + "lookoutvision" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, "machinelearning" : { "endpoints" : { "eu-west-1" : { }, @@ -4217,6 +4397,7 @@ "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, + "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ca-central-1" : { }, "eu-central-1" : { }, @@ -4623,6 +4804,12 @@ "region" : "us-east-1" }, "hostname" : "route53.amazonaws.com" + }, + "fips-aws-global" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "route53-fips.amazonaws.com" } }, "isRegionalized" : false, @@ -5220,6 +5407,12 @@ "eu-west-3" : { }, "me-south-1" : { }, "sa-east-1" : { }, + "servicediscovery-fips" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "hostname" : "servicediscovery-fips.ca-central-1.amazonaws.com" + }, "us-east-1" : { }, "us-east-2" : { }, "us-west-1" : { }, @@ -5338,6 +5531,7 @@ }, "snowball" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -5575,6 +5769,12 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "hostname" : "ssm-fips.ca-central-1.amazonaws.com" + }, "fips-us-east-1" : { "credentialScope" : { "region" : "us-east-1" @@ -5601,30 +5801,6 @@ }, "me-south-1" : { }, "sa-east-1" : { }, - "ssm-facade-fips-us-east-1" : { - "credentialScope" : { - "region" : "us-east-1" - }, - "hostname" : "ssm-facade-fips.us-east-1.amazonaws.com" - }, - "ssm-facade-fips-us-east-2" : { - "credentialScope" : { - "region" : "us-east-2" - }, - "hostname" : "ssm-facade-fips.us-east-2.amazonaws.com" - }, - "ssm-facade-fips-us-west-1" : { - "credentialScope" : { - "region" : "us-west-1" - }, - "hostname" : "ssm-facade-fips.us-west-1.amazonaws.com" - }, - "ssm-facade-fips-us-west-2" : { - "credentialScope" : { - "region" : "us-west-2" - }, - "hostname" : "ssm-facade-fips.us-west-2.amazonaws.com" - }, "us-east-1" : { }, "us-east-2" : { }, "us-west-1" : { }, @@ -5961,9 +6137,14 @@ }, "transcribestreaming" : { "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, "ap-southeast-2" : { }, "ca-central-1" : { }, + "eu-central-1" : { }, "eu-west-1" : { }, + "eu-west-2" : { }, + "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, "us-west-2" : { } @@ -6476,7 +6657,8 @@ }, "appsync" : { "endpoints" : { - "cn-north-1" : { } + "cn-north-1" : { }, + "cn-northwest-1" : { } } }, "athena" : { @@ -6627,6 +6809,16 @@ "cn-northwest-1" : { } } }, + "docdb" : { + "endpoints" : { + "cn-northwest-1" : { + "credentialScope" : { + "region" : "cn-northwest-1" + }, + "hostname" : "rds.cn-northwest-1.amazonaws.com.cn" + } + } + }, "ds" : { "endpoints" : { "cn-north-1" : { }, @@ -6738,6 +6930,12 @@ "cn-northwest-1" : { } } }, + "fsx" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, "gamelift" : { "endpoints" : { "cn-north-1" : { } @@ -6796,6 +6994,11 @@ "cn-northwest-1" : { } } }, + "iotanalytics" : { + "endpoints" : { + "cn-north-1" : { } + } + }, "iotevents" : { "endpoints" : { "cn-north-1" : { } @@ -6841,6 +7044,11 @@ "cn-northwest-1" : { } } }, + "lakeformation" : { + "endpoints" : { + "cn-north-1" : { } + } + }, "lambda" : { "endpoints" : { "cn-north-1" : { }, @@ -6895,12 +7103,6 @@ "region" : "cn-northwest-1" }, "hostname" : "organizations.cn-northwest-1.amazonaws.com.cn" - }, - "fips-aws-cn-global" : { - "credentialScope" : { - "region" : "cn-northwest-1" - }, - "hostname" : "organizations.cn-northwest-1.amazonaws.com.cn" } }, "isRegionalized" : false, @@ -6911,6 +7113,12 @@ "cn-northwest-1" : { } } }, + "ram" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, "rds" : { "endpoints" : { "cn-north-1" : { }, @@ -6985,6 +7193,12 @@ "cn-northwest-1" : { } } }, + "securityhub" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, "serverlessrepo" : { "defaults" : { "protocols" : [ "https" ] @@ -6998,6 +7212,12 @@ } } }, + "servicediscovery" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, "sms" : { "endpoints" : { "cn-north-1" : { }, @@ -7168,8 +7388,18 @@ }, "acm" : { "endpoints" : { - "us-gov-east-1" : { }, - "us-gov-west-1" : { } + "us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "acm.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "acm.us-gov-west-1.amazonaws.com" + } } }, "acm-pca" : { @@ -7261,8 +7491,12 @@ "protocols" : [ "http", "https" ] }, "endpoints" : { - "us-gov-east-1" : { }, - "us-gov-west-1" : { } + "us-gov-east-1" : { + "protocols" : [ "http", "https" ] + }, + "us-gov-west-1" : { + "protocols" : [ "http", "https" ] + } } }, "appstream2" : { @@ -7315,8 +7549,12 @@ "protocols" : [ "http", "https" ] }, "endpoints" : { - "us-gov-east-1" : { }, - "us-gov-west-1" : { } + "us-gov-east-1" : { + "protocols" : [ "http", "https" ] + }, + "us-gov-west-1" : { + "protocols" : [ "http", "https" ] + } } }, "backup" : { @@ -7457,6 +7695,12 @@ }, "cognito-identity" : { "endpoints" : { + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "cognito-identity-fips.us-gov-west-1.amazonaws.com" + }, "us-gov-west-1" : { } } }, @@ -7498,6 +7742,18 @@ }, "config" : { "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "config.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "config.us-gov-west-1.amazonaws.com" + }, "us-gov-east-1" : { }, "us-gov-west-1" : { } } @@ -7651,6 +7907,18 @@ "protocols" : [ "http", "https" ] }, "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "eks.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "eks.us-gov-west-1.amazonaws.com" + }, "us-gov-east-1" : { }, "us-gov-west-1" : { } } @@ -7838,6 +8106,25 @@ "protocols" : [ "https" ] }, "endpoints" : { + "dataplane-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "greengrass-ats.iot.us-gov-east-1.amazonaws.com" + }, + "dataplane-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "greengrass-ats.iot.us-gov-west-1.amazonaws.com" + }, + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "greengrass-fips.us-gov-east-1.amazonaws.com" + }, + "us-gov-east-1" : { }, "us-gov-west-1" : { "credentialScope" : { "region" : "us-gov-west-1" @@ -7852,6 +8139,13 @@ "protocols" : [ "https" ] }, "endpoints" : { + "us-gov-east-1" : { }, + "us-gov-east-1-fips" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "guardduty.us-gov-east-1.amazonaws.com" + }, "us-gov-west-1" : { }, "us-gov-west-1-fips" : { "credentialScope" : { @@ -7965,6 +8259,17 @@ "us-gov-west-1" : { } } }, + "lakeformation" : { + "endpoints" : { + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "lakeformation-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-west-1" : { } + } + }, "lambda" : { "endpoints" : { "fips-us-gov-east-1" : { @@ -8117,9 +8422,14 @@ "credentialScope" : { "region" : "us-gov-west-1" }, - "hostname" : "pinpoint.us-gov-west-1.amazonaws.com" + "hostname" : "pinpoint-fips.us-gov-west-1.amazonaws.com" }, - "us-gov-west-1" : { } + "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "pinpoint.us-gov-west-1.amazonaws.com" + } } }, "polly" : { @@ -8209,6 +8519,12 @@ "region" : "us-gov-west-1" }, "hostname" : "route53.us-gov.amazonaws.com" + }, + "fips-aws-us-gov-global" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "route53.us-gov.amazonaws.com" } }, "isRegionalized" : false, @@ -8442,18 +8758,6 @@ }, "hostname" : "ssm.us-gov-west-1.amazonaws.com" }, - "ssm-facade-fips-us-gov-east-1" : { - "credentialScope" : { - "region" : "us-gov-east-1" - }, - "hostname" : "ssm-facade.us-gov-east-1.amazonaws.com" - }, - "ssm-facade-fips-us-gov-west-1" : { - "credentialScope" : { - "region" : "us-gov-west-1" - }, - "hostname" : "ssm-facade.us-gov-west-1.amazonaws.com" - }, "us-gov-east-1" : { }, "us-gov-west-1" : { } } @@ -8970,6 +9274,14 @@ "us-iso-east-1" : { } } }, + "translate" : { + "defaults" : { + "protocols" : [ "https" ] + }, + "endpoints" : { + "us-iso-east-1" : { } + } + }, "workspaces" : { "endpoints" : { "us-iso-east-1" : { } @@ -8992,6 +9304,16 @@ } }, "services" : { + "api.ecr" : { + "endpoints" : { + "us-isob-east-1" : { + "credentialScope" : { + "region" : "us-isob-east-1" + }, + "hostname" : "api.ecr.us-isob-east-1.sc2s.sgov.gov" + } + } + }, "application-autoscaling" : { "defaults" : { "protocols" : [ "http", "https" ] @@ -9018,6 +9340,11 @@ "us-isob-east-1" : { } } }, + "codedeploy" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, "config" : { "endpoints" : { "us-isob-east-1" : { } @@ -9055,6 +9382,11 @@ "us-isob-east-1" : { } } }, + "ecs" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, "elasticache" : { "endpoints" : { "us-isob-east-1" : { } @@ -9072,6 +9404,11 @@ "us-isob-east-1" : { } } }, + "es" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, "events" : { "endpoints" : { "us-isob-east-1" : { } diff --git a/core/sdk-core/pom.xml b/core/sdk-core/pom.xml index 31b61bd56066..da72b9efcc75 100644 --- a/core/sdk-core/pom.xml +++ b/core/sdk-core/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk core - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT sdk-core AWS Java SDK :: SDK Core diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/SdkPublisher.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/SdkPublisher.java index d69e55ff568d..978fe7aa8389 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/SdkPublisher.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/SdkPublisher.java @@ -24,11 +24,11 @@ import org.reactivestreams.Subscriber; import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.utils.async.BufferingSubscriber; -import software.amazon.awssdk.utils.async.DelegatingSubscriber; import software.amazon.awssdk.utils.async.FilteringSubscriber; import software.amazon.awssdk.utils.async.FlatteningSubscriber; import software.amazon.awssdk.utils.async.LimitingSubscriber; import software.amazon.awssdk.utils.async.SequentialSubscriber; +import software.amazon.awssdk.utils.internal.MappingSubscriber; /** * Interface that is implemented by the Async auto-paginated responses. @@ -79,12 +79,7 @@ default SdkPublisher filter(Predicate predicate) { * @return New publisher with events mapped according to the given function. */ default SdkPublisher map(Function mapper) { - return subscriber -> subscribe(new DelegatingSubscriber(subscriber) { - @Override - public void onNext(T t) { - subscriber.onNext(mapper.apply(t)); - } - }); + return subscriber -> subscribe(MappingSubscriber.create(subscriber, mapper)); } /** diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/ExecutionInterceptorChain.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/ExecutionInterceptorChain.java index 11fa74a4ee65..e99ef4be102a 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/ExecutionInterceptorChain.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/ExecutionInterceptorChain.java @@ -20,6 +20,7 @@ import java.util.ArrayList; import java.util.List; import java.util.Objects; +import java.util.Optional; import java.util.function.Consumer; import org.reactivestreams.Publisher; import software.amazon.awssdk.annotations.SdkProtectedApi; @@ -28,6 +29,7 @@ import software.amazon.awssdk.core.async.AsyncRequestBody; import software.amazon.awssdk.core.internal.interceptor.DefaultFailedExecutionContext; import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.http.ContentStreamProvider; import software.amazon.awssdk.http.SdkHttpFullRequest; import software.amazon.awssdk.http.SdkHttpRequest; import software.amazon.awssdk.http.SdkHttpResponse; @@ -83,24 +85,13 @@ public InterceptorContext modifyHttpRequestAndHttpContent(InterceptorContext con ExecutionAttributes executionAttributes) { InterceptorContext result = context; for (ExecutionInterceptor interceptor : interceptors) { - AsyncRequestBody asyncRequestBody = interceptor.modifyAsyncHttpContent(result, executionAttributes).orElse(null); - - SdkHttpFullRequest sdkHttpFullRequest = (SdkHttpFullRequest) context.httpRequest(); - if (!result.requestBody().isPresent() && sdkHttpFullRequest.contentStreamProvider().isPresent()) { - long contentLength = Long.parseLong(sdkHttpFullRequest.firstMatchingHeader("Content-Length").orElse("0")); - String contentType = sdkHttpFullRequest.firstMatchingHeader("Content-Type").orElse(""); - RequestBody requestBody = RequestBody.fromContentProvider(sdkHttpFullRequest.contentStreamProvider().get(), - contentLength, - contentType); - result = result.toBuilder().requestBody(requestBody).build(); - } - RequestBody requestBody = interceptor.modifyHttpContent(result, executionAttributes).orElse(null); - SdkHttpRequest interceptorResult = interceptor.modifyHttpRequest(result, executionAttributes); validateInterceptorResult(result.httpRequest(), interceptorResult, interceptor, "modifyHttpRequest"); + result = applySdkHttpFullRequestHack(result); + result = result.copy(b -> b.httpRequest(interceptorResult) .asyncRequestBody(asyncRequestBody) .requestBody(requestBody)); @@ -108,6 +99,32 @@ public InterceptorContext modifyHttpRequestAndHttpContent(InterceptorContext con return result; } + private InterceptorContext applySdkHttpFullRequestHack(InterceptorContext context) { + // Someone thought it would be a great idea to allow interceptors to return SdkHttpFullRequest to modify the payload + // instead of using the modifyPayload method. This is for backwards-compatibility with those interceptors. + // TODO: Update interceptors to use the proper payload-modifying method so that this code path is only used for older + // client versions. Maybe if we ever decide to break @SdkProtectedApis (if we stop using Jackson?!) we can even remove + // this hack! + SdkHttpFullRequest sdkHttpFullRequest = (SdkHttpFullRequest) context.httpRequest(); + + if (context.requestBody().isPresent()) { + return context; + } + + Optional contentStreamProvider = sdkHttpFullRequest.contentStreamProvider(); + + if (!contentStreamProvider.isPresent()) { + return context; + } + + long contentLength = Long.parseLong(sdkHttpFullRequest.firstMatchingHeader("Content-Length").orElse("0")); + String contentType = sdkHttpFullRequest.firstMatchingHeader("Content-Type").orElse(""); + RequestBody requestBody = RequestBody.fromContentProvider(contentStreamProvider.get(), + contentLength, + contentType); + return context.toBuilder().requestBody(requestBody).build(); + } + public void beforeTransmission(Context.BeforeTransmission context, ExecutionAttributes executionAttributes) { interceptors.forEach(i -> i.beforeTransmission(context, executionAttributes)); } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/SdkInternalExecutionAttribute.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/SdkInternalExecutionAttribute.java index 69721a1ac4db..1c72ddcd9600 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/SdkInternalExecutionAttribute.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/SdkInternalExecutionAttribute.java @@ -16,6 +16,7 @@ package software.amazon.awssdk.core.interceptor; import software.amazon.awssdk.annotations.SdkProtectedApi; +import software.amazon.awssdk.core.interceptor.trait.HttpChecksumRequired; /** * Attributes that can be applied to all sdk requests. Only generated code from the SDK clients should set these values. @@ -29,6 +30,9 @@ public final class SdkInternalExecutionAttribute extends SdkExecutionAttribute { */ public static final ExecutionAttribute IS_FULL_DUPLEX = new ExecutionAttribute<>("IsFullDuplex"); + public static final ExecutionAttribute HTTP_CHECKSUM_REQUIRED = + new ExecutionAttribute<>("HttpChecksumRequired"); + private SdkInternalExecutionAttribute() { } } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/trait/HttpChecksumRequired.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/trait/HttpChecksumRequired.java new file mode 100644 index 000000000000..eee5b08fadcb --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/trait/HttpChecksumRequired.java @@ -0,0 +1,28 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.interceptor.trait; + +import software.amazon.awssdk.annotations.SdkProtectedApi; + +@SdkProtectedApi +public class HttpChecksumRequired { + private HttpChecksumRequired() { + } + + public static HttpChecksumRequired create() { + return new HttpChecksumRequired(); + } +} diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/handler/BaseClientHandler.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/handler/BaseClientHandler.java index 26f281fa14f7..2e8523b32642 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/handler/BaseClientHandler.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/handler/BaseClientHandler.java @@ -17,6 +17,7 @@ import java.net.URI; import java.time.Duration; +import java.util.Optional; import java.util.function.BiFunction; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.core.Response; @@ -35,6 +36,8 @@ import software.amazon.awssdk.core.internal.InternalCoreExecutionAttribute; import software.amazon.awssdk.core.internal.util.MetricUtils; import software.amazon.awssdk.core.metrics.CoreMetric; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.http.ContentStreamProvider; import software.amazon.awssdk.http.SdkHttpFullRequest; import software.amazon.awssdk.http.SdkHttpFullResponse; import software.amazon.awssdk.metrics.MetricCollector; @@ -126,10 +129,30 @@ private static SdkHttpFullRequest modifyEndpointHostIfNeeded(SdkHttpFullRequest } private static void addHttpRequest(ExecutionContext executionContext, SdkHttpFullRequest request) { - InterceptorContext interceptorContext = executionContext.interceptorContext().copy(b -> b.httpRequest(request)); + InterceptorContext interceptorContext = executionContext.interceptorContext(); + + Optional contentStreamProvider = request.contentStreamProvider(); + if (contentStreamProvider.isPresent()) { + interceptorContext = interceptorContext.copy(b -> b.httpRequest(request) + .requestBody(getBody(request))); + } else { + interceptorContext = interceptorContext.copy(b -> b.httpRequest(request)); + } + executionContext.interceptorContext(interceptorContext); } + private static RequestBody getBody(SdkHttpFullRequest request) { + Optional contentStreamProvider = request.contentStreamProvider(); + if (contentStreamProvider.isPresent()) { + long contentLength = Long.parseLong(request.firstMatchingHeader("Content-Length").orElse("0")); + String contentType = request.firstMatchingHeader("Content-Type").orElse(""); + return RequestBody.fromContentProvider(contentStreamProvider.get(), contentLength, contentType); + } + + return null; + } + private static void runAfterMarshallingInterceptors(ExecutionContext executionContext) { executionContext.interceptorChain().afterMarshalling(executionContext.interceptorContext(), executionContext.executionAttributes()); diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/loader/DefaultSdkAsyncHttpClientBuilder.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/loader/DefaultSdkAsyncHttpClientBuilder.java index f96796277658..afcdce212ae9 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/loader/DefaultSdkAsyncHttpClientBuilder.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/loader/DefaultSdkAsyncHttpClientBuilder.java @@ -43,9 +43,9 @@ public SdkAsyncHttpClient buildWithDefaults(AttributeMap serviceDefaults) { .map(f -> f.buildWithDefaults(serviceDefaults)) .orElseThrow( () -> SdkClientException.builder() - .message("Unable to load an HTTP implementation from any provider in the" + - "chain. You must declare a dependency on an appropriate HTTP" + - "implementation or pass in an SdkHttpClient explicitly to the" + + .message("Unable to load an HTTP implementation from any provider in the " + + "chain. You must declare a dependency on an appropriate HTTP " + + "implementation or pass in an SdkHttpClient explicitly to the " + "client builder.") .build()); } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/interceptor/HttpChecksumRequiredInterceptor.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/interceptor/HttpChecksumRequiredInterceptor.java new file mode 100644 index 000000000000..1426e8bade1a --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/interceptor/HttpChecksumRequiredInterceptor.java @@ -0,0 +1,95 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.interceptor; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.Optional; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.interceptor.Context; +import software.amazon.awssdk.core.interceptor.ExecutionAttribute; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.http.Header; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.utils.Md5Utils; + +/** + * Implements the "httpChecksumRequired" C2J trait. Operations with that trait applied will automatically include a "Content-MD5" + * header, containing a checksum of the payload. + * + *

This is NOT supported for asynchronous HTTP content, which is currently only used for streaming upload operations. If such + * operations are added in the future, we'll have to find a way to support them in a non-blocking manner. That will likely require + * interface changes of some sort, because it's not currently possible to do a non-blocking update to request headers. + */ +@SdkInternalApi +public class HttpChecksumRequiredInterceptor implements ExecutionInterceptor { + private static final ExecutionAttribute CONTENT_MD5_VALUE = new ExecutionAttribute<>("ContentMd5"); + + @Override + public void afterMarshalling(Context.AfterMarshalling context, ExecutionAttributes executionAttributes) { + boolean isHttpChecksumRequired = isHttpChecksumRequired(executionAttributes); + boolean requestAlreadyHasMd5 = context.httpRequest().firstMatchingHeader(Header.CONTENT_MD5).isPresent(); + + Optional syncContent = context.requestBody(); + Optional asyncContent = context.asyncRequestBody(); + + if (!isHttpChecksumRequired || requestAlreadyHasMd5) { + return; + } + + if (asyncContent.isPresent()) { + throw new IllegalArgumentException("This operation requires a content-MD5 checksum, but one cannot be calculated " + + "for non-blocking content."); + } + + syncContent.ifPresent(requestBody -> saveContentMd5(requestBody, executionAttributes)); + } + + @Override + public SdkHttpRequest modifyHttpRequest(Context.ModifyHttpRequest context, ExecutionAttributes executionAttributes) { + String contentMd5 = executionAttributes.getAttribute(CONTENT_MD5_VALUE); + if (contentMd5 != null) { + return context.httpRequest().copy(r -> r.putHeader(Header.CONTENT_MD5, contentMd5)); + } + return context.httpRequest(); + } + + private boolean isHttpChecksumRequired(ExecutionAttributes executionAttributes) { + return executionAttributes.getAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED) != null; + } + + /** + * Calculates the MD5 checksum of the provided request (and base64 encodes it), storing the result in + * {@link #CONTENT_MD5_VALUE}. + * + *

Note: This assumes that the content stream provider can create multiple new streams. If it only supports one (e.g. with + * an input stream that doesn't support mark/reset), we could consider buffering the content in memory here and updating the + * request body to use that buffered content. We obviously don't want to do that for giant streams, so we haven't opted to do + * that yet. + */ + private void saveContentMd5(RequestBody requestBody, ExecutionAttributes executionAttributes) { + try { + String payloadMd5 = Md5Utils.md5AsBase64(requestBody.contentStreamProvider().newStream()); + executionAttributes.putAttribute(CONTENT_MD5_VALUE, payloadMd5); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } +} diff --git a/core/sdk-core/src/main/resources/software/amazon/awssdk/global/handlers/execution.interceptors b/core/sdk-core/src/main/resources/software/amazon/awssdk/global/handlers/execution.interceptors new file mode 100644 index 000000000000..886f98df1c68 --- /dev/null +++ b/core/sdk-core/src/main/resources/software/amazon/awssdk/global/handlers/execution.interceptors @@ -0,0 +1 @@ +software.amazon.awssdk.core.internal.interceptor.HttpChecksumRequiredInterceptor \ No newline at end of file diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/async/SdkPublishersTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/async/SdkPublishersTest.java index 95d4d36bae09..b7c88409e4a7 100644 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/async/SdkPublishersTest.java +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/async/SdkPublishersTest.java @@ -26,9 +26,9 @@ import org.reactivestreams.Publisher; import org.reactivestreams.Subscriber; import org.reactivestreams.Subscription; - import software.amazon.awssdk.core.internal.async.SdkPublishers; import utils.FakePublisher; +import utils.FakeSdkPublisher; public class SdkPublishersTest { @Test @@ -45,6 +45,45 @@ public void envelopeWrappedPublisher() { assertThat(fakeSubscriber.recordedEvents()).containsExactly("prefix:content", ":suffix"); } + @Test + public void mapTransformsCorrectly() { + FakeSdkPublisher fakePublisher = new FakeSdkPublisher<>(); + FakeStringSubscriber fakeSubscriber = new FakeStringSubscriber(); + fakePublisher.map(String::toUpperCase).subscribe(fakeSubscriber); + + fakePublisher.publish("one"); + fakePublisher.publish("two"); + fakePublisher.complete(); + + assertThat(fakeSubscriber.recordedEvents()).containsExactly("ONE", "TWO"); + assertThat(fakeSubscriber.isComplete()).isTrue(); + assertThat(fakeSubscriber.isError()).isFalse(); + } + + @Test + public void mapHandlesError() { + FakeSdkPublisher fakePublisher = new FakeSdkPublisher<>(); + FakeStringSubscriber fakeSubscriber = new FakeStringSubscriber(); + RuntimeException exception = new IllegalArgumentException("Twos are not supported"); + + fakePublisher.map(s -> { + if ("two".equals(s)) { + throw exception; + } + + return s.toUpperCase(); + }).subscribe(fakeSubscriber); + + fakePublisher.publish("one"); + fakePublisher.publish("two"); + fakePublisher.publish("three"); + + assertThat(fakeSubscriber.recordedEvents()).containsExactly("ONE"); + assertThat(fakeSubscriber.isComplete()).isFalse(); + assertThat(fakeSubscriber.isError()).isTrue(); + assertThat(fakeSubscriber.recordedErrors()).containsExactly(exception); + } + private final static class FakeByteBufferSubscriber implements Subscriber { private final List recordedEvents = new ArrayList<>(); @@ -73,4 +112,48 @@ public List recordedEvents() { return this.recordedEvents; } } + + private final static class FakeStringSubscriber implements Subscriber { + private final List recordedEvents = new ArrayList<>(); + private final List recordedErrors = new ArrayList<>(); + private boolean isComplete = false; + private boolean isError = false; + + @Override + public void onSubscribe(Subscription s) { + s.request(1000); + } + + @Override + public void onNext(String s) { + recordedEvents.add(s); + } + + @Override + public void onError(Throwable t) { + recordedErrors.add(t); + this.isError = true; + } + + @Override + public void onComplete() { + this.isComplete = true; + } + + public List recordedEvents() { + return this.recordedEvents; + } + + public List recordedErrors() { + return this.recordedErrors; + } + + public boolean isComplete() { + return isComplete; + } + + public boolean isError() { + return isError; + } + } } \ No newline at end of file diff --git a/core/sdk-core/src/test/java/utils/EmptySubscriptionTest.java b/core/sdk-core/src/test/java/utils/EmptySubscriptionTest.java new file mode 100644 index 000000000000..f2c9ef74030a --- /dev/null +++ b/core/sdk-core/src/test/java/utils/EmptySubscriptionTest.java @@ -0,0 +1,55 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package utils; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.runners.MockitoJUnitRunner; +import org.reactivestreams.Subscriber; +import software.amazon.awssdk.utils.internal.async.EmptySubscription; + +import static org.assertj.core.api.Assertions.assertThatIllegalArgumentException; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; + +@RunWith(MockitoJUnitRunner.class) +public class EmptySubscriptionTest { + + @Mock + private Subscriber mockSubscriber; + + @Test + public void emptySubscription_with_invalid_request() { + EmptySubscription emptySubscription = new EmptySubscription(mockSubscriber); + assertThatIllegalArgumentException().isThrownBy(() -> emptySubscription.request(-1)); + } + + @Test + public void emptySubscription_with_normal_execution() { + EmptySubscription emptySubscription = new EmptySubscription(mockSubscriber); + emptySubscription.request(1); + verify(mockSubscriber).onComplete(); + } + + @Test + public void emptySubscription_when_terminated_externally() { + EmptySubscription emptySubscription = new EmptySubscription(mockSubscriber); + emptySubscription.cancel(); + emptySubscription.request(1); + verify(mockSubscriber, never()).onComplete(); + } +} diff --git a/core/sdk-core/src/test/java/utils/FakeSdkPublisher.java b/core/sdk-core/src/test/java/utils/FakeSdkPublisher.java new file mode 100644 index 000000000000..2df31b0a1d15 --- /dev/null +++ b/core/sdk-core/src/test/java/utils/FakeSdkPublisher.java @@ -0,0 +1,54 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package utils; + +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; +import software.amazon.awssdk.core.async.SdkPublisher; + +public class FakeSdkPublisher implements SdkPublisher { + private Subscriber delegateSubscriber; + + @Override + public void subscribe(Subscriber subscriber) { + this.delegateSubscriber = subscriber; + this.delegateSubscriber.onSubscribe(new FakeSubscription()); + } + + public void publish(T str) { + this.delegateSubscriber.onNext(str); + } + + public void complete() { + this.delegateSubscriber.onComplete(); + } + + public void doThrow(Throwable t) { + this.delegateSubscriber.onError(t); + } + + private static final class FakeSubscription implements Subscription { + @Override + public void request(long n) { + + } + + @Override + public void cancel() { + + } + } +} diff --git a/core/sdk-core/src/test/java/utils/SdkSubscriberTest.java b/core/sdk-core/src/test/java/utils/SdkSubscriberTest.java new file mode 100644 index 000000000000..34fb7f415cb3 --- /dev/null +++ b/core/sdk-core/src/test/java/utils/SdkSubscriberTest.java @@ -0,0 +1,160 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package utils; + +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.runners.MockitoJUnitRunner; +import org.reactivestreams.Subscriber; +import software.amazon.awssdk.core.pagination.async.AsyncPageFetcher; +import software.amazon.awssdk.core.pagination.async.PaginatedItemsPublisher; +import software.amazon.awssdk.utils.async.LimitingSubscriber; +import software.amazon.awssdk.utils.internal.async.EmptySubscription; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.function.Function; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Matchers.anyObject; +import static org.mockito.Mockito.*; + +@RunWith(MockitoJUnitRunner.class) +public class SdkSubscriberTest { + + public static final Function> SAMPLE_ITERATOR = response -> Arrays.asList(1, 2, 3, 4, 5, 6).listIterator(); + public static final Function> EMPTY_ITERATOR = response -> new ArrayList().listIterator(); + @Mock + AsyncPageFetcher asyncPageFetcher; + PaginatedItemsPublisher itemsPublisher; + + @Mock + Subscriber mockSubscriber; + + @Before + public void setUp() { + doReturn(CompletableFuture.completedFuture(1)) + .when(asyncPageFetcher).nextPage(null); + doReturn(false) + .when(asyncPageFetcher).hasNextPage(anyObject()); + } + + @Test + public void limitingSubscriber_with_different_limits() throws InterruptedException, ExecutionException, TimeoutException { + itemsPublisher = PaginatedItemsPublisher.builder().nextPageFetcher(asyncPageFetcher) + .iteratorFunction(SAMPLE_ITERATOR).isLastPage(false).build(); + + final List belowLimit = new ArrayList<>(); + itemsPublisher.limit(3).subscribe(e -> belowLimit.add(e)).get(5, TimeUnit.SECONDS); + assertThat(belowLimit).isEqualTo(Arrays.asList(1, 2, 3)); + + final List beyondLimit = new ArrayList<>(); + itemsPublisher.limit(33).subscribe(e -> beyondLimit.add(e)).get(5, TimeUnit.SECONDS); + assertThat(beyondLimit).isEqualTo(Arrays.asList(1, 2, 3, 4, 5, 6)); + + final List zeroLimit = new ArrayList<>(); + itemsPublisher.limit(0).subscribe(e -> zeroLimit.add(e)).get(5, TimeUnit.SECONDS); + assertThat(zeroLimit).isEqualTo(Arrays.asList()); + } + + @Test + public void filteringSubscriber_with_different_filters() throws InterruptedException, ExecutionException, TimeoutException { + itemsPublisher = PaginatedItemsPublisher.builder().nextPageFetcher(asyncPageFetcher) + .iteratorFunction(SAMPLE_ITERATOR).isLastPage(false).build(); + + final List filteredSomeList = new ArrayList<>(); + itemsPublisher.filter(i -> i % 2 == 0).subscribe(e -> filteredSomeList.add(e)).get(5, TimeUnit.SECONDS); + assertThat(filteredSomeList).isEqualTo(Arrays.asList(2, 4, 6)); + + final List filteredAllList = new ArrayList<>(); + itemsPublisher.filter(i -> i % 10 == 0).subscribe(e -> filteredAllList.add(e)).get(5, TimeUnit.SECONDS); + assertThat(filteredAllList).isEqualTo(Arrays.asList()); + + final List filteredNone = new ArrayList<>(); + itemsPublisher.filter(i -> i % 1 == 0).subscribe(e -> filteredNone.add(e)).get(5, TimeUnit.SECONDS); + assertThat(filteredNone).isEqualTo(Arrays.asList(1, 2, 3, 4, 5, 6)); + + } + + @Test + public void limit_and_filter_subscriber_chained_with_different_conditions() throws InterruptedException, ExecutionException, TimeoutException { + itemsPublisher = PaginatedItemsPublisher.builder().nextPageFetcher(asyncPageFetcher) + .iteratorFunction(SAMPLE_ITERATOR).isLastPage(false).build(); + + final List belowLimitWithFiltering = new ArrayList<>(); + itemsPublisher.limit(4).filter(i -> i % 2 == 0).subscribe(e -> belowLimitWithFiltering.add(e)).get(5, TimeUnit.SECONDS); + assertThat(belowLimitWithFiltering).isEqualTo(Arrays.asList(2, 4)); + + final List beyondLimitWithAllFiltering = new ArrayList<>(); + itemsPublisher.limit(33).filter(i -> i % 10 == 0).subscribe(e -> beyondLimitWithAllFiltering.add(e)).get(5, TimeUnit.SECONDS); + assertThat(beyondLimitWithAllFiltering).isEqualTo(Arrays.asList()); + + final List zeroLimitAndNoFilter = new ArrayList<>(); + itemsPublisher.limit(0).filter(i -> i % 1 == 0).subscribe(e -> zeroLimitAndNoFilter.add(e)).get(5, TimeUnit.SECONDS); + assertThat(zeroLimitAndNoFilter).isEqualTo(Arrays.asList()); + + final List filteringbelowLimitWith = new ArrayList<>(); + itemsPublisher.filter(i -> i % 2 == 0).limit(2).subscribe(e -> filteringbelowLimitWith.add(e)).get(5, TimeUnit.SECONDS); + assertThat(filteringbelowLimitWith).isEqualTo(Arrays.asList(2, 4)); + + final List filteringAndOutsideLimit = new ArrayList<>(); + itemsPublisher.filter(i -> i % 10 == 0).limit(33).subscribe(e -> filteringAndOutsideLimit.add(e)).get(5, TimeUnit.SECONDS); + assertThat(filteringAndOutsideLimit).isEqualTo(Arrays.asList()); + } + + @Test + public void limit__subscriber_with_empty_input_and_zero_limit() throws InterruptedException, ExecutionException, TimeoutException { + itemsPublisher = PaginatedItemsPublisher.builder().nextPageFetcher(asyncPageFetcher) + .iteratorFunction(EMPTY_ITERATOR).isLastPage(false).build(); + + final List zeroLimit = new ArrayList<>(); + itemsPublisher.limit(0).subscribe(e -> zeroLimit.add(e)).get(5, TimeUnit.SECONDS); + assertThat(zeroLimit).isEqualTo(Arrays.asList()); + + final List nonZeroLimit = new ArrayList<>(); + itemsPublisher.limit(10).subscribe(e -> nonZeroLimit.add(e)).get(5, TimeUnit.SECONDS); + assertThat(zeroLimit).isEqualTo(Arrays.asList()); + } + + + @Test + public void limiting_subscriber_with_multiple_thread_publishers() throws InterruptedException { + final int limitFactor = 5; + LimitingSubscriber limitingSubscriber = new LimitingSubscriber<>(mockSubscriber, limitFactor); + limitingSubscriber.onSubscribe(new EmptySubscription(mockSubscriber)); + final ExecutorService executorService = Executors.newFixedThreadPool(10); + for (int i = 0; i < 10; i++) { + final Integer integer = Integer.valueOf(i); + executorService.submit(() -> limitingSubscriber.onNext(new Integer(integer))); + } + executorService.awaitTermination(300, TimeUnit.MILLISECONDS); + Mockito.verify(mockSubscriber, times(limitFactor)).onNext(anyInt()); + Mockito.verify(mockSubscriber).onComplete(); + Mockito.verify(mockSubscriber).onSubscribe(anyObject()); + Mockito.verify(mockSubscriber, never()).onError(anyObject()); + } +} diff --git a/http-client-spi/pom.xml b/http-client-spi/pom.xml index d9ba9dc95fac..f36fbae800b4 100644 --- a/http-client-spi/pom.xml +++ b/http-client-spi/pom.xml @@ -22,7 +22,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT http-client-spi AWS Java SDK :: HTTP Client Interface diff --git a/http-client-spi/src/main/java/software/amazon/awssdk/http/SdkHttpFullRequest.java b/http-client-spi/src/main/java/software/amazon/awssdk/http/SdkHttpFullRequest.java index 474a6b49a8cf..b4e945e17327 100644 --- a/http-client-spi/src/main/java/software/amazon/awssdk/http/SdkHttpFullRequest.java +++ b/http-client-spi/src/main/java/software/amazon/awssdk/http/SdkHttpFullRequest.java @@ -57,18 +57,24 @@ static SdkHttpFullRequest.Builder builder() { */ interface Builder extends SdkHttpRequest.Builder { /** - * Convenience method to set the {@link #protocol()}, {@link #host()}, {@link #port()}, and - * {@link #encodedPath()} from a {@link URI} object. + * Convenience method to set the {@link #protocol()}, {@link #host()}, {@link #port()}, + * {@link #encodedPath()} and extracts query parameters from a {@link URI} object. * * @param uri URI containing protocol, host, port and path. * @return This builder for method chaining. */ @Override default Builder uri(URI uri) { - return this.protocol(uri.getScheme()) + Builder builder = this.protocol(uri.getScheme()) .host(uri.getHost()) .port(uri.getPort()) .encodedPath(SdkHttpUtils.appendUri(uri.getRawPath(), encodedPath())); + if (uri.getRawQuery() != null) { + builder.clearQueryParameters(); + SdkHttpUtils.uriParams(uri) + .forEach(this::putRawQueryParameter); + } + return builder; } /** diff --git a/http-client-spi/src/main/java/software/amazon/awssdk/http/SdkHttpRequest.java b/http-client-spi/src/main/java/software/amazon/awssdk/http/SdkHttpRequest.java index d49f824d3a9b..088d9cd2c69e 100644 --- a/http-client-spi/src/main/java/software/amazon/awssdk/http/SdkHttpRequest.java +++ b/http-client-spi/src/main/java/software/amazon/awssdk/http/SdkHttpRequest.java @@ -135,17 +135,23 @@ default URI getUri() { */ interface Builder extends CopyableBuilder { /** - * Convenience method to set the {@link #protocol()}, {@link #host()}, {@link #port()}, and - * {@link #encodedPath()} from a {@link URI} object. + * Convenience method to set the {@link #protocol()}, {@link #host()}, {@link #port()}, + * {@link #encodedPath()} and extracts query parameters from a {@link URI} object. * * @param uri URI containing protocol, host, port and path. * @return This builder for method chaining. */ default Builder uri(URI uri) { - return this.protocol(uri.getScheme()) + Builder builder = this.protocol(uri.getScheme()) .host(uri.getHost()) .port(uri.getPort()) .encodedPath(SdkHttpUtils.appendUri(uri.getRawPath(), encodedPath())); + if (uri.getRawQuery() != null) { + builder.clearQueryParameters(); + SdkHttpUtils.uriParams(uri) + .forEach(this::putRawQueryParameter); + } + return builder; } /** diff --git a/http-client-spi/src/test/java/software/amazon/awssdk/http/SdkHttpRequestResponseTest.java b/http-client-spi/src/test/java/software/amazon/awssdk/http/SdkHttpRequestResponseTest.java index 3848b6407b78..875e664d4303 100644 --- a/http-client-spi/src/test/java/software/amazon/awssdk/http/SdkHttpRequestResponseTest.java +++ b/http-client-spi/src/test/java/software/amazon/awssdk/http/SdkHttpRequestResponseTest.java @@ -22,9 +22,10 @@ import static org.assertj.core.api.Assertions.assertThatExceptionOfType; import static org.assertj.core.api.Assertions.assertThatThrownBy; +import java.net.URI; +import java.net.URISyntaxException; import java.util.AbstractMap; import java.util.Arrays; -import java.util.Collections; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -401,13 +402,74 @@ public Map> getMap() { }); } + @Test + public void testSdkHttpFullRequestBuilderNoQueryParams() { + URI uri = URI.create("https://github.com/aws/aws-sdk-java-v2/issues/2034"); + final SdkHttpFullRequest sdkHttpFullRequest = SdkHttpFullRequest.builder().method(SdkHttpMethod.POST).uri(uri).build(); + assertThat(sdkHttpFullRequest.getUri().getQuery()).isNullOrEmpty(); + } + + @Test + public void testSdkHttpFullRequestBuilderUriWithQueryParams() { + URI uri = URI.create("https://github.com/aws/aws-sdk-java-v2/issues/2034?reqParam=1234&oParam=3456%26reqParam%3D5678"); + final SdkHttpFullRequest sdkHttpFullRequest = + SdkHttpFullRequest.builder().method(SdkHttpMethod.POST).uri(uri).build(); + assertThat(sdkHttpFullRequest.getUri().getQuery()).contains("reqParam=1234", "oParam=3456&reqParam=5678"); + } + + @Test + public void testSdkHttpFullRequestBuilderUriWithQueryParamWithoutValue() { + final String expected = "https://github.com/aws/aws-sdk-for-java-v2?foo"; + URI myUri = URI.create(expected); + SdkHttpFullRequest actual = SdkHttpFullRequest.builder().method(SdkHttpMethod.POST).uri(myUri).build(); + assertThat(actual.getUri()).hasToString(expected); + } + + @Test + public void testSdkHttpRequestBuilderNoQueryParams() { + URI uri = URI.create("https://github.com/aws/aws-sdk-java-v2/issues/2034"); + final SdkHttpRequest sdkHttpRequest = SdkHttpRequest.builder().method(SdkHttpMethod.POST).uri(uri).build(); + assertThat(sdkHttpRequest.getUri().getQuery()).isNullOrEmpty(); + } + + @Test + public void testSdkHttpRequestBuilderUriWithQueryParams() { + URI uri = URI.create("https://github.com/aws/aws-sdk-java-v2/issues/2034?reqParam=1234&oParam=3456%26reqParam%3D5678"); + final SdkHttpRequest sdkHttpRequest = + SdkHttpRequest.builder().method(SdkHttpMethod.POST).uri(uri).build(); + assertThat(sdkHttpRequest.getUri().getQuery()).contains("reqParam=1234", "oParam=3456&reqParam=5678"); + } + + @Test + public void testSdkHttpRequestBuilderUriWithQueryParamsIgnoreOtherQueryParams() { + URI uri = URI.create("https://github.com/aws/aws-sdk-java-v2/issues/2034?reqParam=1234&oParam=3456%26reqParam%3D5678"); + final SdkHttpRequest sdkHttpRequest = + SdkHttpRequest.builder().method(SdkHttpMethod.POST).appendRawQueryParameter("clean", "up").uri(uri).build(); + assertThat(sdkHttpRequest.getUri().getQuery()).contains("reqParam=1234", "oParam=3456&reqParam=5678") + .doesNotContain("clean"); + } + + @Test + public void testSdkHttpRequestBuilderUriWithQueryParamWithoutValue() { + final String expected = "https://github.com/aws/aws-sdk-for-java-v2?foo"; + URI myUri = URI.create(expected); + SdkHttpRequest actual = SdkHttpRequest.builder().method(SdkHttpMethod.POST).uri(myUri).build(); + assertThat(actual.getUri()).hasToString(expected); + } + private interface BuilderProxy { BuilderProxy setValue(String key, String value); + BuilderProxy appendValue(String key, String value); + BuilderProxy setValues(String key, List values); + BuilderProxy removeValue(String key); + BuilderProxy clearValues(); + BuilderProxy setMap(Map> map); + Map> getMap(); } diff --git a/http-clients/apache-client/pom.xml b/http-clients/apache-client/pom.xml index 8ce5d46513bb..3205a3ef1d13 100644 --- a/http-clients/apache-client/pom.xml +++ b/http-clients/apache-client/pom.xml @@ -21,7 +21,7 @@ http-clients software.amazon.awssdk - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT apache-client diff --git a/http-clients/aws-crt-client/pom.xml b/http-clients/aws-crt-client/pom.xml index f5388e645d3f..e23cde44e602 100644 --- a/http-clients/aws-crt-client/pom.xml +++ b/http-clients/aws-crt-client/pom.xml @@ -21,7 +21,7 @@ http-clients software.amazon.awssdk - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT 4.0.0 diff --git a/http-clients/netty-nio-client/pom.xml b/http-clients/netty-nio-client/pom.xml index 90df8ad1e13f..aa1fba8633de 100644 --- a/http-clients/netty-nio-client/pom.xml +++ b/http-clients/netty-nio-client/pom.xml @@ -20,7 +20,7 @@ http-clients software.amazon.awssdk - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT 4.0.0 diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/ChannelPipelineInitializer.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/ChannelPipelineInitializer.java index ba29a9872e83..2893e6016971 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/ChannelPipelineInitializer.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/ChannelPipelineInitializer.java @@ -19,6 +19,7 @@ import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.HTTP2_INITIAL_WINDOW_SIZE; import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.PROTOCOL_FUTURE; import static software.amazon.awssdk.http.nio.netty.internal.NettyConfiguration.HTTP2_CONNECTION_PING_TIMEOUT_SECONDS; +import static software.amazon.awssdk.http.nio.netty.internal.utils.NettyUtils.newSslHandler; import static software.amazon.awssdk.utils.NumericUtils.saturatedCast; import static software.amazon.awssdk.utils.StringUtils.lowerCase; @@ -44,8 +45,6 @@ import java.time.Duration; import java.util.concurrent.CompletableFuture; import java.util.concurrent.atomic.AtomicReference; -import javax.net.ssl.SSLEngine; -import javax.net.ssl.SSLParameters; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.http.Protocol; import software.amazon.awssdk.http.nio.netty.internal.http2.Http2GoAwayEventListener; @@ -93,10 +92,7 @@ public void channelCreated(Channel ch) { ChannelPipeline pipeline = ch.pipeline(); if (sslCtx != null) { - // Need to provide host and port to enable SNI - // https://github.com/netty/netty/issues/3801#issuecomment-104274440 - SslHandler sslHandler = sslCtx.newHandler(ch.alloc(), poolKey.getHost(), poolKey.getPort()); - configureSslEngine(sslHandler.engine()); + SslHandler sslHandler = newSslHandler(sslCtx, ch.alloc(), poolKey.getHost(), poolKey.getPort()); pipeline.addLast(sslHandler); pipeline.addLast(SslCloseCompletionEventHandler.getInstance()); @@ -134,20 +130,6 @@ public void channelCreated(Channel ch) { pipeline.addLast(new LoggingHandler(LogLevel.DEBUG)); } - /** - * Enable HostName verification. - * - * See https://netty.io/4.0/api/io/netty/handler/ssl/SslContext.html#newHandler-io.netty.buffer.ByteBufAllocator-java.lang - * .String-int- - * - * @param sslEngine the sslEngine to configure - */ - private void configureSslEngine(SSLEngine sslEngine) { - SSLParameters sslParameters = sslEngine.getSSLParameters(); - sslParameters.setEndpointIdentificationAlgorithm("HTTPS"); - sslEngine.setSSLParameters(sslParameters); - } - private void configureHttp2(Channel ch, ChannelPipeline pipeline) { // Using Http2FrameCodecBuilder and Http2MultiplexHandler based on 4.1.37 release notes // https://netty.io/news/2019/06/28/4-1-37-Final.html diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/Http1TunnelConnectionPool.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/Http1TunnelConnectionPool.java index f687f7052c3d..804b65ef3c81 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/Http1TunnelConnectionPool.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/Http1TunnelConnectionPool.java @@ -15,6 +15,8 @@ package software.amazon.awssdk.http.nio.netty.internal; +import static software.amazon.awssdk.http.nio.netty.internal.utils.NettyUtils.newSslHandler; + import io.netty.buffer.ByteBufAllocator; import io.netty.channel.Channel; import io.netty.channel.ChannelHandler; @@ -148,7 +150,7 @@ private SslHandler createSslHandlerIfNeeded(ByteBufAllocator alloc) { return null; } - return sslContext.newHandler(alloc, proxyAddress.getHost(), proxyAddress.getPort()); + return newSslHandler(sslContext, alloc, proxyAddress.getHost(), proxyAddress.getPort()); } private static boolean isTunnelEstablished(Channel ch) { diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/ResponseHandler.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/ResponseHandler.java index 8019d0816a93..c4745d8b1031 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/ResponseHandler.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/ResponseHandler.java @@ -38,6 +38,7 @@ import io.netty.handler.codec.http.HttpUtil; import io.netty.handler.timeout.ReadTimeoutException; import io.netty.handler.timeout.WriteTimeoutException; +import io.netty.util.ReferenceCountUtil; import java.io.IOException; import java.nio.ByteBuffer; import java.util.List; @@ -251,6 +252,7 @@ private void onCancel() { public void onNext(HttpContent httpContent) { // isDone may be true if the subscriber cancelled if (isDone.get()) { + ReferenceCountUtil.release(httpContent); return; } diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/NettyUtils.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/NettyUtils.java index 584bdc635f8e..60b4d31c0e6b 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/NettyUtils.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/NettyUtils.java @@ -15,7 +15,10 @@ package software.amazon.awssdk.http.nio.netty.internal.utils; +import io.netty.buffer.ByteBufAllocator; import io.netty.channel.EventLoop; +import io.netty.handler.ssl.SslContext; +import io.netty.handler.ssl.SslHandler; import io.netty.util.AttributeKey; import io.netty.util.concurrent.EventExecutor; import io.netty.util.concurrent.Future; @@ -25,6 +28,8 @@ import java.util.concurrent.CompletableFuture; import java.util.function.BiConsumer; import java.util.function.Function; +import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLParameters; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.utils.Logger; @@ -173,4 +178,29 @@ public static AttributeKey getOrCreateAttributeKey(String attr) { return AttributeKey.newInstance(attr); //CHECKSTYLE:ON } + + /** + * @return a new {@link SslHandler} with ssl engine configured + */ + public static SslHandler newSslHandler(SslContext sslContext, ByteBufAllocator alloc, String peerHost, int peerPort) { + // Need to provide host and port to enable SNI + // https://github.com/netty/netty/issues/3801#issuecomment-104274440 + SslHandler sslHandler = sslContext.newHandler(alloc, peerHost, peerPort); + configureSslEngine(sslHandler.engine()); + return sslHandler; + } + + /** + * Enable Hostname verification. + * + * See https://netty.io/4.0/api/io/netty/handler/ssl/SslContext.html#newHandler-io.netty.buffer.ByteBufAllocator-java.lang + * .String-int- + * + * @param sslEngine the sslEngine to configure + */ + private static void configureSslEngine(SSLEngine sslEngine) { + SSLParameters sslParameters = sslEngine.getSSLParameters(); + sslParameters.setEndpointIdentificationAlgorithm("HTTPS"); + sslEngine.setSSLParameters(sslParameters); + } } diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyClientTlsAuthTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyClientTlsAuthTest.java index 936a056425f5..8498e838171f 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyClientTlsAuthTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyClientTlsAuthTest.java @@ -22,11 +22,14 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static software.amazon.awssdk.http.SdkHttpConfigurationOption.TRUST_ALL_CERTIFICATES; + import com.github.tomakehurst.wiremock.WireMockServer; import com.github.tomakehurst.wiremock.core.WireMockConfiguration; import java.io.IOException; +import org.hamcrest.CoreMatchers; import org.junit.After; import org.junit.AfterClass; +import org.junit.Assume; import org.junit.BeforeClass; import org.junit.Rule; import org.junit.Test; @@ -136,6 +139,8 @@ public void proxyRequest_ableToAuthenticate() { @Test public void proxyRequest_noKeyManagerGiven_notAbleToSendConnect() throws Throwable { + // TODO: remove this and fix the issue with TLS1.3 + Assume.assumeThat(System.getProperty("java.version"), CoreMatchers.startsWith("1.8")); thrown.expectCause(instanceOf(IOException.class)); thrown.expectMessage("Unable to send CONNECT request to proxy"); diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/Http1TunnelConnectionPoolTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/Http1TunnelConnectionPoolTest.java index 3789749730bc..12100fcf8acc 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/Http1TunnelConnectionPoolTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/Http1TunnelConnectionPoolTest.java @@ -45,6 +45,7 @@ import java.util.List; import java.util.concurrent.CountDownLatch; import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLParameters; import javax.net.ssl.SSLSessionContext; import org.junit.AfterClass; import org.junit.Before; @@ -180,6 +181,9 @@ public void acquireFromDelegatePoolFails_failsFuture() { @Test public void sslContextProvided_andProxyUsingHttps_addsSslHandler() { SslHandler mockSslHandler = mock(SslHandler.class); + SSLEngine mockSslEngine = mock(SSLEngine.class); + when(mockSslHandler.engine()).thenReturn(mockSslEngine); + when(mockSslEngine.getSSLParameters()).thenReturn(mock(SSLParameters.class)); TestSslContext mockSslCtx = new TestSslContext(mockSslHandler); Http1TunnelConnectionPool.InitHandlerSupplier supplier = (srcPool, remoteAddr, initFuture) -> { diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/PublisherAdapterTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/PublisherAdapterTest.java index bed4a8063a65..62a639c727d2 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/PublisherAdapterTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/PublisherAdapterTest.java @@ -20,6 +20,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; import static org.mockito.Mockito.when; import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.EXECUTE_FUTURE_KEY; import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.PROTOCOL_FUTURE; @@ -27,6 +28,7 @@ import io.netty.buffer.ByteBufAllocator; import io.netty.buffer.EmptyByteBuf; +import io.netty.buffer.Unpooled; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.EventLoopGroup; import io.netty.handler.codec.http.DefaultHttpContent; @@ -43,6 +45,7 @@ import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; +import org.reactivestreams.Publisher; import org.reactivestreams.Subscriber; import org.reactivestreams.Subscription; import software.amazon.awssdk.http.Protocol; @@ -155,6 +158,75 @@ public void errorOccurred_shouldInvokeResponseHandler() { verify(responseHandler).onError(exception); } + @Test + public void subscriptionCancelled_upstreamPublisherCallsOnNext_httpContentReleased() { + HttpContent firstContent = mock(HttpContent.class); + when(firstContent.content()).thenReturn(Unpooled.EMPTY_BUFFER); + + HttpContent[] contentToIgnore = new HttpContent[8]; + for (int i = 0; i < contentToIgnore.length; ++i) { + contentToIgnore[i] = mock(HttpContent.class); + when(contentToIgnore[i].content()).thenReturn(Unpooled.EMPTY_BUFFER); + } + + Publisher publisher = subscriber -> subscriber.onSubscribe(new Subscription() { + @Override + public void request(long l) { + // We ignore any cancel signal and just publish all the content + subscriber.onNext(firstContent); + + for (int i = 0; i < l && i < contentToIgnore.length; ++i) { + subscriber.onNext(contentToIgnore[i]); + } + } + + @Override + public void cancel() { + // no-op + } + }); + + DefaultStreamedHttpResponse streamedResponse = new DefaultStreamedHttpResponse(HttpVersion.HTTP_1_1, + HttpResponseStatus.OK, publisher); + + Subscriber subscriber = new Subscriber() { + private Subscription subscription; + + @Override + public void onSubscribe(Subscription subscription) { + this.subscription = subscription; + subscription.request(Long.MAX_VALUE); + } + + @Override + public void onNext(ByteBuffer byteBuffer) { + subscription.cancel(); + } + + @Override + public void onError(Throwable throwable) { + } + + @Override + public void onComplete() { + } + }; + + ResponseHandler.PublisherAdapter publisherAdapter = new ResponseHandler.PublisherAdapter(streamedResponse, ctx, + requestContext, executeFuture); + + publisherAdapter.subscribe(subscriber); + + // First one should be accessed as normal + verify(firstContent).content(); + verify(firstContent).release(); + + for (int i = 0; i < contentToIgnore.length; ++i) { + verify(contentToIgnore[i]).release(); + verifyNoMoreInteractions(contentToIgnore[i]); + } + } + static final class TestSubscriber implements Subscriber { private Subscription subscription; diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/utils/NettyUtilsTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/utils/NettyUtilsTest.java index 680057886174..d476e6b37c2f 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/utils/NettyUtilsTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/utils/NettyUtilsTest.java @@ -17,8 +17,14 @@ import static org.assertj.core.api.Assertions.assertThat; +import io.netty.channel.Channel; +import io.netty.handler.ssl.SslContext; +import io.netty.handler.ssl.SslContextBuilder; +import io.netty.handler.ssl.SslHandler; import io.netty.util.AttributeKey; +import javax.net.ssl.SSLEngine; import org.junit.Test; +import software.amazon.awssdk.http.nio.netty.internal.MockChannel; public class NettyUtilsTest { @Test @@ -27,4 +33,21 @@ public void testGetOrCreateAttributeKey_calledTwiceWithSameName_returnsSameInsta AttributeKey fooAttr = NettyUtils.getOrCreateAttributeKey(attr); assertThat(NettyUtils.getOrCreateAttributeKey(attr)).isSameAs(fooAttr); } + + @Test + public void newSslHandler_sslEngineShouldBeConfigured() throws Exception { + SslContext sslContext = SslContextBuilder.forClient().build(); + Channel channel = null; + try { + channel = new MockChannel(); + SslHandler sslHandler = NettyUtils.newSslHandler(sslContext, channel.alloc(), "localhost", 80); + SSLEngine engine = sslHandler.engine(); + assertThat(engine.getSSLParameters().getEndpointIdentificationAlgorithm()).isEqualTo("HTTPS"); + } finally { + if (channel != null) { + channel.close(); + } + } + + } } diff --git a/http-clients/pom.xml b/http-clients/pom.xml index 09be585f954a..07b259995857 100644 --- a/http-clients/pom.xml +++ b/http-clients/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT 4.0.0 diff --git a/http-clients/url-connection-client/pom.xml b/http-clients/url-connection-client/pom.xml index 56d270086b17..e79fe478d143 100644 --- a/http-clients/url-connection-client/pom.xml +++ b/http-clients/url-connection-client/pom.xml @@ -20,7 +20,7 @@ http-clients software.amazon.awssdk - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT 4.0.0 diff --git a/metric-publishers/cloudwatch-metric-publisher/pom.xml b/metric-publishers/cloudwatch-metric-publisher/pom.xml index 2e1abab0f0f0..060dd6153719 100644 --- a/metric-publishers/cloudwatch-metric-publisher/pom.xml +++ b/metric-publishers/cloudwatch-metric-publisher/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk metric-publishers - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT cloudwatch-metric-publisher diff --git a/metric-publishers/pom.xml b/metric-publishers/pom.xml index aae0720a20f6..5a054b3ae939 100644 --- a/metric-publishers/pom.xml +++ b/metric-publishers/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT metric-publishers diff --git a/pom.xml b/pom.xml index 25ad3eb83a6c..5c68f33a7406 100644 --- a/pom.xml +++ b/pom.xml @@ -20,7 +20,7 @@ 4.0.0 software.amazon.awssdk aws-sdk-java-pom - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT pom AWS Java SDK :: Parent The Amazon Web Services SDK for Java provides Java APIs @@ -95,12 +95,12 @@ 2.5 3.1.13 - - 4.1.46.Final + + 4.1.53.Final 3.3 1.3 UTF-8 - 3.1.11 + 4.1.4 2.0.4 2.3.24-incubating 1.13.0 @@ -122,7 +122,7 @@ 1.1 7.1.0 2.3 - 2.0.29.Final + 2.0.34.Final 1.11.477 1.0.392 @@ -152,7 +152,7 @@ 1.8.2 1.8 - 4.5.9 + 4.5.13 4.4.11 @@ -286,6 +286,25 @@ + + add-license-notice + generate-sources + + add-resource + + + + + ${maven.multiModuleProjectDirectory} + + LICENSE.txt + NOTICE.txt + + META-INF + + + + diff --git a/release-scripts/pom.xml b/release-scripts/pom.xml index 4caf6f4a626e..10bee1dbc1e7 100644 --- a/release-scripts/pom.xml +++ b/release-scripts/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT ../pom.xml release-scripts diff --git a/release-scripts/src/main/java/software/amazon/awssdk/release/CreateNewServiceModuleMain.java b/release-scripts/src/main/java/software/amazon/awssdk/release/CreateNewServiceModuleMain.java new file mode 100644 index 000000000000..845a78ec169a --- /dev/null +++ b/release-scripts/src/main/java/software/amazon/awssdk/release/CreateNewServiceModuleMain.java @@ -0,0 +1,145 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.release; + +import static java.nio.charset.StandardCharsets.UTF_8; + +import java.io.IOException; +import java.nio.file.FileVisitResult; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.SimpleFileVisitor; +import java.nio.file.attribute.BasicFileAttributes; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import org.apache.commons.cli.CommandLine; +import org.apache.commons.io.FileUtils; +import org.apache.commons.lang3.StringUtils; +import software.amazon.awssdk.utils.Validate; +import software.amazon.awssdk.utils.internal.CodegenNamingUtils; + +/** + * A command line application to create a new, empty service. This *does not* add the new service to the shared pom.xmls, that + * should be done via {@link FinalizeNewServiceModuleMain}. + * + * Example usage: + *

+ * mvn exec:java -pl :release-scripts \
+ *     -Dexec.mainClass="software.amazon.awssdk.release.CreateNewServiceModuleMain" \
+ *     -Dexec.args="--maven-project-root /path/to/root
+ *                  --maven-project-version 2.1.4-SNAPSHOT
+ *                  --service-id 'Service Id'
+ *                  --service-module-name service-module-name
+ *                  --service-protocol json"
+ * 
+ */ +public class CreateNewServiceModuleMain extends Cli { + private CreateNewServiceModuleMain() { + super(requiredOption("service-module-name", "The name of the service module to be created."), + requiredOption("service-id", "The service ID of the service module to be created."), + requiredOption("service-protocol", "The protocol of the service module to be created."), + requiredOption("maven-project-root", "The root directory for the maven project."), + requiredOption("maven-project-version", "The maven version of the service module to be created.")); + } + + public static void main(String[] args) { + new CreateNewServiceModuleMain().run(args); + } + + @Override + protected void run(CommandLine commandLine) throws Exception { + new NewServiceCreator(commandLine).run(); + } + + private static class NewServiceCreator { + private final Path mavenProjectRoot; + private final String mavenProjectVersion; + private final String serviceModuleName; + private final String serviceId; + private final String serviceProtocol; + + private NewServiceCreator(CommandLine commandLine) { + this.mavenProjectRoot = Paths.get(commandLine.getOptionValue("maven-project-root").trim()); + this.mavenProjectVersion = commandLine.getOptionValue("maven-project-version").trim(); + this.serviceModuleName = commandLine.getOptionValue("service-module-name").trim(); + this.serviceId = commandLine.getOptionValue("service-id").trim(); + this.serviceProtocol = transformSpecialProtocols(commandLine.getOptionValue("service-protocol").trim()); + + Validate.isTrue(Files.exists(mavenProjectRoot), "Project root does not exist: " + mavenProjectRoot); + } + + private String transformSpecialProtocols(String protocol) { + switch (protocol) { + case "ec2": return "query"; + case "rest-xml": return "xml"; + case "rest-json": return "json"; + default: return protocol; + } + } + + public void run() throws Exception { + Path servicesRoot = mavenProjectRoot.resolve("services"); + Path templateModulePath = servicesRoot.resolve("new-service-template"); + Path newServiceModulePath = servicesRoot.resolve(serviceModuleName); + + createNewModuleFromTemplate(templateModulePath, newServiceModulePath); + replaceTemplatePlaceholders(newServiceModulePath); + } + + private void createNewModuleFromTemplate(Path templateModulePath, Path newServiceModule) throws IOException { + FileUtils.copyDirectory(templateModulePath.toFile(), newServiceModule.toFile()); + } + + private void replaceTemplatePlaceholders(Path newServiceModule) throws IOException { + Files.walkFileTree(newServiceModule, new SimpleFileVisitor() { + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + replacePlaceholdersInFile(file); + return FileVisitResult.CONTINUE; + } + }); + } + + private void replacePlaceholdersInFile(Path file) throws IOException { + String fileContents = new String(Files.readAllBytes(file), UTF_8); + String newFileContents = replacePlaceholders(fileContents); + Files.write(file, newFileContents.getBytes(UTF_8)); + } + + private String replacePlaceholders(String line) { + String[] searchList = { + "{{MVN_ARTIFACT_ID}}", + "{{MVN_NAME}}", + "{{MVN_VERSION}}", + "{{PROTOCOL}}" + }; + String[] replaceList = { + serviceModuleName, + mavenName(serviceId), + mavenProjectVersion, + serviceProtocol + }; + return StringUtils.replaceEach(line, searchList, replaceList); + } + + private String mavenName(String serviceId) { + return Stream.of(CodegenNamingUtils.splitOnWordBoundaries(serviceId)) + .map(StringUtils::capitalize) + .collect(Collectors.joining(" ")); + } + } +} diff --git a/release-scripts/src/main/java/software/amazon/awssdk/release/FinalizeNewServiceModuleMain.java b/release-scripts/src/main/java/software/amazon/awssdk/release/FinalizeNewServiceModuleMain.java new file mode 100644 index 000000000000..7ff318bff5fa --- /dev/null +++ b/release-scripts/src/main/java/software/amazon/awssdk/release/FinalizeNewServiceModuleMain.java @@ -0,0 +1,130 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.release; + +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import org.apache.commons.cli.CommandLine; +import org.w3c.dom.Document; +import org.w3c.dom.Node; +import software.amazon.awssdk.utils.Validate; + +/** + * A command line application to add new services to the shared pom.xml files. + * + * Example usage: + *
+ * mvn exec:java -pl :release-scripts \
+ *     -Dexec.mainClass="software.amazon.awssdk.release.FinalizeNewServiceModuleMain" \
+ *     -Dexec.args="--maven-project-root /path/to/root
+ *                  --service-module-names service-module-name-1,service-module-name-2"
+ * 
+ */ +public class FinalizeNewServiceModuleMain extends Cli { + private FinalizeNewServiceModuleMain() { + super(requiredOption("service-module-names", + "A comma-separated list containing the name of the service modules to be created."), + requiredOption("maven-project-root", "The root directory for the maven project.")); + } + + public static void main(String[] args) { + new FinalizeNewServiceModuleMain().run(args); + } + + @Override + protected void run(CommandLine commandLine) throws Exception { + new NewServiceCreator(commandLine).run(); + } + + private static class NewServiceCreator { + private final Path mavenProjectRoot; + private final List serviceModuleNames; + + private NewServiceCreator(CommandLine commandLine) { + this.mavenProjectRoot = Paths.get(commandLine.getOptionValue("maven-project-root").trim()); + this.serviceModuleNames = Stream.of(commandLine.getOptionValue("service-module-names").split(",")) + .map(String::trim) + .collect(Collectors.toList()); + + Validate.isTrue(Files.exists(mavenProjectRoot), "Project root does not exist: " + mavenProjectRoot); + } + + public void run() throws Exception { + for (String serviceModuleName : serviceModuleNames) { + Path servicesPomPath = mavenProjectRoot.resolve("services").resolve("pom.xml"); + Path aggregatePomPath = mavenProjectRoot.resolve("aws-sdk-java").resolve("pom.xml"); + Path bomPomPath = mavenProjectRoot.resolve("bom").resolve("pom.xml"); + + new AddSubmoduleTransformer(serviceModuleName).transform(servicesPomPath); + new AddDependencyTransformer(serviceModuleName).transform(aggregatePomPath); + new AddDependencyManagementDependencyTransformer(serviceModuleName).transform(bomPomPath); + } + } + + private static class AddSubmoduleTransformer extends PomTransformer { + private final String serviceModuleName; + + private AddSubmoduleTransformer(String serviceModuleName) { + this.serviceModuleName = serviceModuleName; + } + + @Override + protected void updateDocument(Document doc) { + Node project = findChild(doc, "project"); + Node modules = findChild(project, "modules"); + + modules.appendChild(textElement(doc, "module", serviceModuleName)); + } + } + + private static class AddDependencyTransformer extends PomTransformer { + private final String serviceModuleName; + + private AddDependencyTransformer(String serviceModuleName) { + this.serviceModuleName = serviceModuleName; + } + + @Override + protected void updateDocument(Document doc) { + Node project = findChild(doc, "project"); + Node dependencies = findChild(project, "dependencies"); + + dependencies.appendChild(sdkDependencyElement(doc, serviceModuleName)); + } + } + + private static class AddDependencyManagementDependencyTransformer extends PomTransformer { + private final String serviceModuleName; + + private AddDependencyManagementDependencyTransformer(String serviceModuleName) { + this.serviceModuleName = serviceModuleName; + } + + @Override + protected void updateDocument(Document doc) { + Node project = findChild(doc, "project"); + Node dependencyManagement = findChild(project, "dependencyManagement"); + Node dependencies = findChild(dependencyManagement, "dependencies"); + + dependencies.appendChild(sdkDependencyElement(doc, serviceModuleName)); + } + } + } +} diff --git a/scripts/changelog/model.py b/scripts/changelog/model.py index 722108159c76..0b21fedd415f 100644 --- a/scripts/changelog/model.py +++ b/scripts/changelog/model.py @@ -5,9 +5,10 @@ def __init__(self, version, date, entries): self.entries = entries class ChangelogEntry(object): - def __init__(self, type, category, description): + def __init__(self, type, category, description, contributor): self.type = type self.category = category + self.contributor = contributor self.description = description class Version(object): diff --git a/scripts/changelog/util.py b/scripts/changelog/util.py index 998a5f2a1429..acb86d8d8049 100644 --- a/scripts/changelog/util.py +++ b/scripts/changelog/util.py @@ -38,7 +38,8 @@ def parse_release_changes(changes_json): return ReleaseChanges(version, date, entries) def parse_changelog_entry(entry_json): - return ChangelogEntry(entry_json['type'], entry_json['category'], entry_json['description']) + return ChangelogEntry(entry_json['type'], entry_json['category'], entry_json['description'], + entry_json.get('contributor')) def parse_version_string(s): version_parts = [s for s in s.split('.')] diff --git a/scripts/changelog/writer.py b/scripts/changelog/writer.py index 458fb8f02561..ca9e55d0b256 100644 --- a/scripts/changelog/writer.py +++ b/scripts/changelog/writer.py @@ -1,6 +1,7 @@ from changelog.git import stage_file from changelog.util import load_all_released_changes, load_unreleased_changes, version_cmp from functools import cmp_to_key +from operator import attrgetter class ChangelogWriter(object): """ @@ -28,6 +29,19 @@ def write_changes(self, changes): self.write_items_for_category(s, self.bugfixes, "Bugfixes") self.write_items_for_category(s, self.deprecations, "Deprecations") self.write_items_for_category(s, self.removals, "Removals") + self.write_contributors() + + def write_contributors(self): + contributors = set() + for e in self.current_changes.entries: + if e.contributor: + contributors.add(e.contributor) + + if contributors: + self.output_file.write("## __Contributors__\n") + contributors_string = ', '.join(contributors) + self.output_file.write("Special thanks to the following contributors to this release: \n") + self.output_file.write("\n" + contributors_string + "\n") def process_changes(self, changes): self.current_changes = changes @@ -44,7 +58,7 @@ def reset_maps(self): def group_entries(self): for e in self.current_changes.entries: m = self.get_map_for_type(e.type) - m.setdefault(e.category, []).append(e.description) + m.setdefault(e.category, []).append(e) self.categories.add(e.category) def get_sorted_categories(self): @@ -63,7 +77,8 @@ def write_category_header(self, c): self.output_file.write("## __%s__\n" % c) def write_items_for_category(self, category, map, header): - items = sorted(map.get(category, [])) + entries = map.get(category, []) + items = sorted(entries, key=attrgetter('description')) self.write_entries_with_header(header, items) def write_entries_with_header(self, header, entries): @@ -75,7 +90,8 @@ def write_entries_with_header(self, header, entries): self.write('\n') def write_entry(self,e): - entry_lines = e.splitlines(True) + description = e.description + entry_lines = description.splitlines(True) self.write(" - %s" % entry_lines[0]) for l in entry_lines[1:]: if len(l.strip()) == 0: @@ -83,7 +99,10 @@ def write_entry(self,e): else: self.write(" %s" % l) self.write('\n') - + if e.contributor: + self.write(" - ") + self.write("Contributed by: " + e.contributor) + self.write('\n') def get_map_for_type(self, t): if t == 'feature': diff --git a/scripts/new-change b/scripts/new-change index 21d12444947e..bab75aeedbff 100755 --- a/scripts/new-change +++ b/scripts/new-change @@ -61,6 +61,12 @@ type: {change_type} # or "AWS SDK for Java v2" if it's an SDK change to the core, runtime etc category: {category} +# Your GitHub user name to be included in the CHANGELOG. +# Every contribution counts and we would like to recognize +# your contribution! +# Leave it empty if you would prefer not to be mentioned. +contributor: {contributor} + The description of the change. Feel free to use Markdown here. description: {description} """ @@ -76,6 +82,7 @@ def new_changelog_entry(args): 'type': args.change_type, 'category': args.category, 'description': args.description, + 'contributor': args.contributor } else: parsed_values = get_values_from_editor(args) @@ -94,7 +101,7 @@ def get_missing_parts(parsed_values): def all_values_provided(args): - return args.change_type and args.category and args.description + return args.change_type and args.category and args.description and args.contributor def get_values_from_editor(args): @@ -103,6 +110,7 @@ def get_values_from_editor(args): change_type=args.change_type, category=args.category, description=args.description, + contributor=args.contributor ) f.write(contents) f.flush() @@ -137,6 +145,11 @@ def write_new_change(parsed_values): os.makedirs(dirname) # Need to generate a unique filename for this change. category = parsed_values['category'] + + contributor = parsed_values['contributor'] + if contributor and contributor.strip: + parsed_values['contributor'] = "@" + contributor + short_summary = ''.join(filter(lambda x: x in VALID_CHARS, category)) contents = json.dumps(parsed_values, indent=4) + "\n" contents_digest = hashlib.sha1(contents.encode('utf-8')).hexdigest() @@ -176,6 +189,8 @@ def parse_filled_in_contents(contents): parsed['type'] = t elif 'category' not in parsed and line.startswith('category:'): parsed['category'] = line[len('category:'):].strip() + elif 'contributor' not in parsed and line.startswith('contributor:'): + parsed['contributor'] = line[len('contributor:'):].strip() elif 'description' not in parsed and line.startswith('description:'): # Assume that everything until the end of the file is part # of the description, so we can break once we pull in the @@ -193,6 +208,8 @@ def main(): default='', choices=('bugfix', 'feature', 'deprecation')) parser.add_argument('-c', '--category', dest='category', default='') + parser.add_argument('-u', '--contributor', dest='contributor', + default='') parser.add_argument('-d', '--description', dest='description', default='') parser.add_argument('-r', '--repo', default='aws/aws-sdk-java-v2', diff --git a/services-custom/dynamodb-enhanced/pom.xml b/services-custom/dynamodb-enhanced/pom.xml index d3b11096e98d..963095cf2c7c 100644 --- a/services-custom/dynamodb-enhanced/pom.xml +++ b/services-custom/dynamodb-enhanced/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services-custom - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT dynamodb-enhanced ${awsjavasdk.version} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/UpdateItemOperation.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/UpdateItemOperation.java index 3e181b78ae3e..1e34cb470a29 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/UpdateItemOperation.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/UpdateItemOperation.java @@ -59,7 +59,8 @@ public class UpdateItemOperation key -> "#AMZN_MAPPED_" + EnhancedClientUtils.cleanAttributeName(key); private static final Function CONDITIONAL_UPDATE_MAPPER = - key -> "if_not_exists(" + key + ", " + EXPRESSION_VALUE_KEY_MAPPER.apply(key) + ")"; + key -> "if_not_exists(" + EXPRESSION_KEY_MAPPER.apply(key) + ", " + + EXPRESSION_VALUE_KEY_MAPPER.apply(key) + ")"; private final UpdateItemEnhancedRequest request; diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/FailedConversionAsyncTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/FailedConversionAsyncTest.java new file mode 100644 index 000000000000..9cb864bc38aa --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/FailedConversionAsyncTest.java @@ -0,0 +1,103 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests; + +import java.util.Iterator; +import java.util.List; +import java.util.concurrent.CompletionException; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbAsyncTable; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedAsyncClient; +import software.amazon.awssdk.enhanced.dynamodb.Key; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeEnum; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeEnumRecord; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeEnumShortened; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeEnumShortenedRecord; +import software.amazon.awssdk.enhanced.dynamodb.model.Page; +import software.amazon.awssdk.services.dynamodb.model.DeleteTableRequest; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +public class FailedConversionAsyncTest extends LocalDynamoDbAsyncTestBase { + private static final TableSchema TABLE_SCHEMA = TableSchema.fromClass(FakeEnumRecord.class); + private static final TableSchema SHORT_TABLE_SCHEMA = + TableSchema.fromClass(FakeEnumShortenedRecord.class); + + private final DynamoDbEnhancedAsyncClient enhancedClient = + DynamoDbEnhancedAsyncClient.builder() + .dynamoDbClient(getDynamoDbAsyncClient()) + .build(); + + private final DynamoDbAsyncTable mappedTable = + enhancedClient.table(getConcreteTableName("table-name"), TABLE_SCHEMA); + private final DynamoDbAsyncTable mappedShortTable = + enhancedClient.table(getConcreteTableName("table-name"), SHORT_TABLE_SCHEMA); + + @Rule + public ExpectedException exception = ExpectedException.none(); + + @Before + public void createTable() { + mappedTable.createTable(r -> r.provisionedThroughput(getDefaultProvisionedThroughput())).join(); + } + + @After + public void deleteTable() { + getDynamoDbAsyncClient().deleteTable(DeleteTableRequest.builder() + .tableName(getConcreteTableName("table-name")) + .build()).join(); + } + + @Test + public void exceptionOnRead() { + FakeEnumRecord record = new FakeEnumRecord(); + record.setId("123"); + record.setEnumAttribute(FakeEnum.TWO); + mappedTable.putItem(record).join(); + + assertThatThrownBy(() -> mappedShortTable.getItem(Key.builder().partitionValue("123").build()).join()) + .isInstanceOf(CompletionException.class) + .hasCauseInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("TWO") + .hasMessageContaining("FakeEnumShortened"); + } + + @Test + public void iterableExceptionOnRead() { + FakeEnumRecord record = new FakeEnumRecord(); + record.setId("1"); + record.setEnumAttribute(FakeEnum.ONE); + mappedTable.putItem(record).join(); + record.setId("2"); + record.setEnumAttribute(FakeEnum.TWO); + mappedTable.putItem(record).join(); + + List> results = + drainPublisherToError(mappedShortTable.scan(r -> r.limit(1)), 1, IllegalArgumentException.class); + + assertThat(results).hasOnlyOneElementSatisfying( + page -> assertThat(page.items()).hasOnlyOneElementSatisfying(item -> { + assertThat(item.getId()).isEqualTo("1"); + assertThat(item.getEnumAttribute()).isEqualTo(FakeEnumShortened.ONE); + })); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/FailedConversionSyncTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/FailedConversionSyncTest.java new file mode 100644 index 000000000000..db93471f8cba --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/FailedConversionSyncTest.java @@ -0,0 +1,99 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests; + +import java.util.Iterator; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClient; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbTable; +import software.amazon.awssdk.enhanced.dynamodb.Key; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeEnum; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeEnumRecord; +import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeEnumShortenedRecord; +import software.amazon.awssdk.enhanced.dynamodb.model.Page; +import software.amazon.awssdk.enhanced.dynamodb.model.PageIterable; +import software.amazon.awssdk.services.dynamodb.model.DeleteTableRequest; + +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +public class FailedConversionSyncTest extends LocalDynamoDbSyncTestBase { + private static final TableSchema TABLE_SCHEMA = TableSchema.fromClass(FakeEnumRecord.class); + private static final TableSchema SHORT_TABLE_SCHEMA = + TableSchema.fromClass(FakeEnumShortenedRecord.class); + + private final DynamoDbEnhancedClient enhancedClient = DynamoDbEnhancedClient.builder() + .dynamoDbClient(getDynamoDbClient()) + .build(); + + private final DynamoDbTable mappedTable = + enhancedClient.table(getConcreteTableName("table-name"), TABLE_SCHEMA); + private final DynamoDbTable mappedShortTable = + enhancedClient.table(getConcreteTableName("table-name"), SHORT_TABLE_SCHEMA); + + @Rule + public ExpectedException exception = ExpectedException.none(); + + @Before + public void createTable() { + mappedTable.createTable(r -> r.provisionedThroughput(getDefaultProvisionedThroughput())); + } + + @After + public void deleteTable() { + getDynamoDbClient().deleteTable(DeleteTableRequest.builder() + .tableName(getConcreteTableName("table-name")) + .build()); + } + + @Test + public void exceptionOnRead() { + FakeEnumRecord record = new FakeEnumRecord(); + record.setId("123"); + record.setEnumAttribute(FakeEnum.TWO); + mappedTable.putItem(record); + + assertThatThrownBy(() -> mappedShortTable.getItem(Key.builder().partitionValue("123").build())) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("TWO") + .hasMessageContaining("FakeEnumShortened"); + } + + @Test + public void iterableExceptionOnRead() { + FakeEnumRecord record = new FakeEnumRecord(); + record.setId("1"); + record.setEnumAttribute(FakeEnum.ONE); + mappedTable.putItem(record); + record.setId("2"); + record.setEnumAttribute(FakeEnum.TWO); + mappedTable.putItem(record); + + Iterator> results = mappedShortTable.scan(r -> r.limit(1)).iterator(); + + assertThatThrownBy(() -> { + // We can't guarantee the order they will be returned + results.next(); + results.next(); + }).isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("TWO") + .hasMessageContaining("FakeEnumShortened"); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/LocalDynamoDbAsyncTestBase.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/LocalDynamoDbAsyncTestBase.java index 1c3135352520..1d25b1ee8329 100644 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/LocalDynamoDbAsyncTestBase.java +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/LocalDynamoDbAsyncTestBase.java @@ -16,8 +16,7 @@ package software.amazon.awssdk.enhanced.dynamodb.functionaltests; import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.*; import java.util.List; import software.amazon.awssdk.core.async.SdkPublisher; @@ -41,4 +40,18 @@ public static List drainPublisher(SdkPublisher publisher, int expected return subscriber.bufferedItems(); } + + public static List drainPublisherToError(SdkPublisher publisher, + int expectedNumberOfResults, + Class expectedError) { + BufferingSubscriber subscriber = new BufferingSubscriber<>(); + publisher.subscribe(subscriber); + subscriber.waitForCompletion(1000L); + + assertThat(subscriber.isCompleted(), is(false)); + assertThat(subscriber.bufferedError(), instanceOf(expectedError)); + assertThat(subscriber.bufferedItems().size(), is(expectedNumberOfResults)); + + return subscriber.bufferedItems(); + } } diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeEnum.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeEnum.java new file mode 100644 index 000000000000..4e4f04efd0fe --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeEnum.java @@ -0,0 +1,21 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests.models; + +public enum FakeEnum { + ONE, + TWO +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeEnumRecord.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeEnumRecord.java new file mode 100644 index 000000000000..35d632c78f24 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeEnumRecord.java @@ -0,0 +1,42 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests.models; + +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbPartitionKey; + +@DynamoDbBean +public class FakeEnumRecord { + private String id; + private FakeEnum enumAttribute; + + @DynamoDbPartitionKey + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public FakeEnum getEnumAttribute() { + return enumAttribute; + } + + public void setEnumAttribute(FakeEnum enumAttribute) { + this.enumAttribute = enumAttribute; + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeEnumShortened.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeEnumShortened.java new file mode 100644 index 000000000000..a44b95daa996 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeEnumShortened.java @@ -0,0 +1,20 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests.models; + +public enum FakeEnumShortened { + ONE, +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeEnumShortenedRecord.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeEnumShortenedRecord.java new file mode 100644 index 000000000000..3a369f515090 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/FakeEnumShortenedRecord.java @@ -0,0 +1,42 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests.models; + +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbPartitionKey; + +@DynamoDbBean +public class FakeEnumShortenedRecord { + private String id; + private FakeEnumShortened enumAttribute; + + @DynamoDbPartitionKey + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public FakeEnumShortened getEnumAttribute() { + return enumAttribute; + } + + public void setEnumAttribute(FakeEnumShortened enumAttribute) { + this.enumAttribute = enumAttribute; + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/RecordWithUpdateBehaviors.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/RecordWithUpdateBehaviors.java index 8dbcbdad829e..cc6edf4b4a2c 100644 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/RecordWithUpdateBehaviors.java +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/models/RecordWithUpdateBehaviors.java @@ -17,6 +17,7 @@ import java.time.Instant; import software.amazon.awssdk.enhanced.dynamodb.extensions.annotations.DynamoDbVersionAttribute; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbAttribute; import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbBean; import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbPartitionKey; import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbUpdateBehavior; @@ -41,6 +42,7 @@ public void setId(String id) { } @DynamoDbUpdateBehavior(WRITE_IF_NOT_EXISTS) + @DynamoDbAttribute("created-on") // Forces a test on attribute name cleaning public Instant getCreatedOn() { return createdOn; } diff --git a/services-custom/pom.xml b/services-custom/pom.xml index 97ee4c735df6..d0e71e3094dd 100644 --- a/services-custom/pom.xml +++ b/services-custom/pom.xml @@ -19,7 +19,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT services-custom AWS Java SDK :: Custom Services diff --git a/services/accessanalyzer/pom.xml b/services/accessanalyzer/pom.xml index 4b81f5ec9e5a..4e201bad37ac 100644 --- a/services/accessanalyzer/pom.xml +++ b/services/accessanalyzer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT accessanalyzer AWS Java SDK :: Services :: AccessAnalyzer diff --git a/services/accessanalyzer/src/main/resources/codegen-resources/service-2.json b/services/accessanalyzer/src/main/resources/codegen-resources/service-2.json index 51f6803af99d..4313bb012bfc 100644 --- a/services/accessanalyzer/src/main/resources/codegen-resources/service-2.json +++ b/services/accessanalyzer/src/main/resources/codegen-resources/service-2.json @@ -12,6 +12,24 @@ "uid":"accessanalyzer-2019-11-01" }, "operations":{ + "ApplyArchiveRule":{ + "name":"ApplyArchiveRule", + "http":{ + "method":"PUT", + "requestUri":"/archive-rule", + "responseCode":200 + }, + "input":{"shape":"ApplyArchiveRuleRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Retroactively applies the archive rule to existing findings that meet the archive rule criteria.

", + "idempotent":true + }, "CreateAnalyzer":{ "name":"CreateAnalyzer", "http":{ @@ -49,7 +67,7 @@ {"shape":"ThrottlingException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Creates an archive rule for the specified analyzer. Archive rules automatically archive findings that meet the criteria you define when you create the rule.

", + "documentation":"

Creates an archive rule for the specified analyzer. Archive rules automatically archive new findings that meet the criteria you define when you create the rule.

", "idempotent":true }, "DeleteAnalyzer":{ @@ -140,7 +158,7 @@ {"shape":"ThrottlingException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Retrieves information about an archive rule.

" + "documentation":"

Retrieves information about an archive rule.

To learn about filter keys that you can use to create an archive rule, see Access Analyzer filter keys in the IAM User Guide.

" }, "GetFinding":{ "name":"GetFinding", @@ -228,7 +246,7 @@ {"shape":"ThrottlingException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Retrieves a list of findings generated by the specified analyzer.

" + "documentation":"

Retrieves a list of findings generated by the specified analyzer.

To learn about filter keys that you can use to create an archive rule, see Access Analyzer filter keys in the IAM User Guide.

" }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -510,6 +528,29 @@ "type":"list", "member":{"shape":"AnalyzerSummary"} }, + "ApplyArchiveRuleRequest":{ + "type":"structure", + "required":[ + "analyzerArn", + "ruleName" + ], + "members":{ + "analyzerArn":{ + "shape":"AnalyzerArn", + "documentation":"

The Amazon resource name (ARN) of the analyzer.

" + }, + "clientToken":{ + "shape":"String", + "documentation":"

A client token.

", + "idempotencyToken":true + }, + "ruleName":{ + "shape":"Name", + "documentation":"

The name of the rule to apply.

" + } + }, + "documentation":"

Retroactively applies an archive rule.

" + }, "ArchiveRuleSummary":{ "type":"structure", "required":[ @@ -832,8 +873,8 @@ "FindingSourceType":{ "type":"string", "enum":[ - "BUCKET_ACL", "POLICY", + "BUCKET_ACL", "S3_ACCESS_POINT" ] }, @@ -1087,7 +1128,8 @@ "documentation":"

Internal server error.

", "error":{"httpStatusCode":500}, "exception":true, - "fault":true + "fault":true, + "retryable":{"throttling":false} }, "ListAnalyzedResourcesRequest":{ "type":"structure", @@ -1330,12 +1372,12 @@ "ResourceType":{ "type":"string", "enum":[ + "AWS::S3::Bucket", "AWS::IAM::Role", - "AWS::KMS::Key", + "AWS::SQS::Queue", "AWS::Lambda::Function", "AWS::Lambda::LayerVersion", - "AWS::S3::Bucket", - "AWS::SQS::Queue" + "AWS::KMS::Key" ] }, "ServiceQuotaExceededException":{ @@ -1463,7 +1505,8 @@ "httpStatusCode":429, "senderFault":true }, - "exception":true + "exception":true, + "retryable":{"throttling":true} }, "Timestamp":{ "type":"timestamp", @@ -1617,10 +1660,10 @@ "ValidationExceptionReason":{ "type":"string", "enum":[ + "unknownOperation", "cannotParse", "fieldValidationFailed", - "other", - "unknownOperation" + "other" ] }, "ValueList":{ @@ -1630,5 +1673,5 @@ "min":1 } }, - "documentation":"

AWS IAM Access Analyzer helps identify potential resource-access risks by enabling you to identify any policies that grant access to an external principal. It does this by using logic-based reasoning to analyze resource-based policies in your AWS environment. An external principal can be another AWS account, a root user, an IAM user or role, a federated user, an AWS service, or an anonymous user. This guide describes the AWS IAM Access Analyzer operations that you can call programmatically. For general information about Access Analyzer, see the AWS IAM Access Analyzer section of the IAM User Guide.

To start using Access Analyzer, you first need to create an analyzer.

" + "documentation":"

AWS IAM Access Analyzer helps identify potential resource-access risks by enabling you to identify any policies that grant access to an external principal. It does this by using logic-based reasoning to analyze resource-based policies in your AWS environment. An external principal can be another AWS account, a root user, an IAM user or role, a federated user, an AWS service, or an anonymous user. This guide describes the AWS IAM Access Analyzer operations that you can call programmatically. For general information about Access Analyzer, see AWS IAM Access Analyzer in the IAM User Guide.

To start using Access Analyzer, you first need to create an analyzer.

" } diff --git a/services/acm/pom.xml b/services/acm/pom.xml index bb19f12a9212..5c5ef272d6bc 100644 --- a/services/acm/pom.xml +++ b/services/acm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT acm AWS Java SDK :: Services :: AWS Certificate Manager diff --git a/services/acmpca/pom.xml b/services/acmpca/pom.xml index 2815221e2f8f..c3e5b24012f3 100644 --- a/services/acmpca/pom.xml +++ b/services/acmpca/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT acmpca AWS Java SDK :: Services :: ACM PCA diff --git a/services/alexaforbusiness/pom.xml b/services/alexaforbusiness/pom.xml index 9d40e7a00fcb..aa1fd6de5dfb 100644 --- a/services/alexaforbusiness/pom.xml +++ b/services/alexaforbusiness/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT 4.0.0 alexaforbusiness diff --git a/services/amplify/pom.xml b/services/amplify/pom.xml index 706b834c2512..bd614ac8ed39 100644 --- a/services/amplify/pom.xml +++ b/services/amplify/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT amplify AWS Java SDK :: Services :: Amplify diff --git a/services/amplify/src/main/resources/codegen-resources/service-2.json b/services/amplify/src/main/resources/codegen-resources/service-2.json index 699b0b33e928..a3e337323558 100644 --- a/services/amplify/src/main/resources/codegen-resources/service-2.json +++ b/services/amplify/src/main/resources/codegen-resources/service-2.json @@ -726,6 +726,10 @@ "shape":"BuildSpec", "documentation":"

Describes the content of the build specification (build spec) for the Amplify app.

" }, + "customHeaders":{ + "shape":"CustomHeaders", + "documentation":"

Describes the custom HTTP headers for the Amplify app.

" + }, "enableAutoBranchCreation":{ "shape":"EnableAutoBranchCreation", "documentation":"

Enables automated branch creation for the Amplify app.

" @@ -747,8 +751,9 @@ }, "AppId":{ "type":"string", - "max":255, - "min":1 + "max":20, + "min":1, + "pattern":"d[a-z0-9]+" }, "Apps":{ "type":"list", @@ -830,7 +835,7 @@ }, "enablePerformanceMode":{ "shape":"EnablePerformanceMode", - "documentation":"

Performance mode optimizes for faster hosting performance by keeping content cached at the edge for a longer interval. Enabling performance mode will mean that hosting configuration or code changes can take up to 10 minutes to roll out.

" + "documentation":"

Enables performance mode for the branch.

Performance mode optimizes for faster hosting performance by keeping content cached at the edge for a longer interval. When performance mode is enabled, hosting configuration or code changes can take up to 10 minutes to roll out.

" }, "buildSpec":{ "shape":"BuildSpec", @@ -838,7 +843,7 @@ }, "enablePullRequestPreview":{ "shape":"EnablePullRequestPreview", - "documentation":"

Enables pull request preview for the autocreated branch.

" + "documentation":"

Enables pull request previews for the autocreated branch.

" }, "pullRequestEnvironmentName":{ "shape":"PullRequestEnvironmentName", @@ -1017,7 +1022,7 @@ }, "enablePerformanceMode":{ "shape":"EnablePerformanceMode", - "documentation":"

Performance mode optimizes for faster hosting performance by keeping content cached at the edge for a longer interval. Enabling performance mode will mean that hosting configuration or code changes can take up to 10 minutes to roll out.

" + "documentation":"

Enables performance mode for the branch.

Performance mode optimizes for faster hosting performance by keeping content cached at the edge for a longer interval. When performance mode is enabled, hosting configuration or code changes can take up to 10 minutes to roll out.

" }, "thumbnailUrl":{ "shape":"ThumbnailUrl", @@ -1041,7 +1046,7 @@ }, "enablePullRequestPreview":{ "shape":"EnablePullRequestPreview", - "documentation":"

Enables pull request preview for the branch.

" + "documentation":"

Enables pull request previews for the branch.

" }, "pullRequestEnvironmentName":{ "shape":"PullRequestEnvironmentName", @@ -1108,7 +1113,7 @@ "members":{ "name":{ "shape":"Name", - "documentation":"

The name for the Amplify app.

" + "documentation":"

The name for an Amplify app.

" }, "description":{ "shape":"Description", @@ -1166,17 +1171,21 @@ "shape":"BuildSpec", "documentation":"

The build specification (build spec) for an Amplify app.

" }, + "customHeaders":{ + "shape":"CustomHeaders", + "documentation":"

The custom HTTP headers for an Amplify app.

" + }, "enableAutoBranchCreation":{ "shape":"EnableAutoBranchCreation", - "documentation":"

Enables automated branch creation for the Amplify app.

" + "documentation":"

Enables automated branch creation for an Amplify app.

" }, "autoBranchCreationPatterns":{ "shape":"AutoBranchCreationPatterns", - "documentation":"

The automated branch creation glob patterns for the Amplify app.

" + "documentation":"

The automated branch creation glob patterns for an Amplify app.

" }, "autoBranchCreationConfig":{ "shape":"AutoBranchCreationConfig", - "documentation":"

The automated branch creation configuration for the Amplify app.

" + "documentation":"

The automated branch creation configuration for an Amplify app.

" } }, "documentation":"

The request structure used to create apps in Amplify.

" @@ -1278,7 +1287,7 @@ }, "enablePerformanceMode":{ "shape":"EnablePerformanceMode", - "documentation":"

Performance mode optimizes for faster hosting performance by keeping content cached at the edge for a longer interval. Enabling performance mode will mean that hosting configuration or code changes can take up to 10 minutes to roll out.

" + "documentation":"

Enables performance mode for the branch.

Performance mode optimizes for faster hosting performance by keeping content cached at the edge for a longer interval. When performance mode is enabled, hosting configuration or code changes can take up to 10 minutes to roll out.

" }, "tags":{ "shape":"TagMap", @@ -1298,7 +1307,7 @@ }, "enablePullRequestPreview":{ "shape":"EnablePullRequestPreview", - "documentation":"

Enables pull request preview for this branch.

" + "documentation":"

Enables pull request previews for this branch.

" }, "pullRequestEnvironmentName":{ "shape":"PullRequestEnvironmentName", @@ -1463,6 +1472,11 @@ "member":{"shape":"CustomDomain"}, "max":255 }, + "CustomHeaders":{ + "type":"string", + "max":25000, + "min":1 + }, "CustomRule":{ "type":"structure", "required":[ @@ -2541,7 +2555,7 @@ "MaxResults":{ "type":"integer", "max":100, - "min":1 + "min":0 }, "Name":{ "type":"string", @@ -3086,17 +3100,21 @@ "shape":"BuildSpec", "documentation":"

The build specification (build spec) for an Amplify app.

" }, + "customHeaders":{ + "shape":"CustomHeaders", + "documentation":"

The custom HTTP headers for an Amplify app.

" + }, "enableAutoBranchCreation":{ "shape":"EnableAutoBranchCreation", - "documentation":"

Enables automated branch creation for the Amplify app.

" + "documentation":"

Enables automated branch creation for an Amplify app.

" }, "autoBranchCreationPatterns":{ "shape":"AutoBranchCreationPatterns", - "documentation":"

Describes the automated branch creation glob patterns for the Amplify app.

" + "documentation":"

Describes the automated branch creation glob patterns for an Amplify app.

" }, "autoBranchCreationConfig":{ "shape":"AutoBranchCreationConfig", - "documentation":"

The automated branch creation configuration for the Amplify app.

" + "documentation":"

The automated branch creation configuration for an Amplify app.

" }, "repository":{ "shape":"Repository", @@ -3177,7 +3195,7 @@ }, "enablePerformanceMode":{ "shape":"EnablePerformanceMode", - "documentation":"

Performance mode optimizes for faster hosting performance by keeping content cached at the edge for a longer interval. Enabling performance mode will mean that hosting configuration or code changes can take up to 10 minutes to roll out.

" + "documentation":"

Enables performance mode for the branch.

Performance mode optimizes for faster hosting performance by keeping content cached at the edge for a longer interval. When performance mode is enabled, hosting configuration or code changes can take up to 10 minutes to roll out.

" }, "buildSpec":{ "shape":"BuildSpec", @@ -3193,7 +3211,7 @@ }, "enablePullRequestPreview":{ "shape":"EnablePullRequestPreview", - "documentation":"

Enables pull request preview for this branch.

" + "documentation":"

Enables pull request previews for this branch.

" }, "pullRequestEnvironmentName":{ "shape":"PullRequestEnvironmentName", diff --git a/services/amplifybackend/pom.xml b/services/amplifybackend/pom.xml new file mode 100644 index 000000000000..bbf2cabf280c --- /dev/null +++ b/services/amplifybackend/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.40-SNAPSHOT + + amplifybackend + AWS Java SDK :: Services :: Amplify Backend + The AWS Java SDK for Amplify Backend module holds the client classes that are used for + communicating with Amplify Backend. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.amplifybackend + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/amplifybackend/src/main/resources/codegen-resources/paginators-1.json b/services/amplifybackend/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..40304c7d438f --- /dev/null +++ b/services/amplifybackend/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,10 @@ +{ + "pagination": { + "ListBackendJobs": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Jobs" + } + } +} diff --git a/services/amplifybackend/src/main/resources/codegen-resources/service-2.json b/services/amplifybackend/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..9c9d6ff4cab1 --- /dev/null +++ b/services/amplifybackend/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,3946 @@ +{ + "metadata": { + "apiVersion": "2020-08-11", + "endpointPrefix": "amplifybackend", + "signingName": "amplifybackend", + "serviceFullName": "AmplifyBackend", + "serviceId": "AmplifyBackend", + "protocol": "rest-json", + "jsonVersion": "1.1", + "uid": "amplifybackend-2020-08-11", + "signatureVersion": "v4" + }, + "operations": { + "CloneBackend": { + "name": "CloneBackend", + "http": { + "method": "POST", + "requestUri": "/backend/{appId}/environments/{backendEnvironmentName}/clone", + "responseCode": 200 + }, + "input": { + "shape": "CloneBackendRequest" + }, + "output": { + "shape": "CloneBackendResponse", + "documentation": "

200 response

" + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

404 response

" + }, + { + "shape": "GatewayTimeoutException", + "documentation": "

504 response

" + }, + { + "shape": "TooManyRequestsException", + "documentation": "

429 response

" + }, + { + "shape": "BadRequestException", + "documentation": "

400 response

" + } + ], + "documentation": "

This operation clones an existing backend.

" + }, + "CreateBackend": { + "name": "CreateBackend", + "http": { + "method": "POST", + "requestUri": "/backend", + "responseCode": 200 + }, + "input": { + "shape": "CreateBackendRequest" + }, + "output": { + "shape": "CreateBackendResponse", + "documentation": "

200 response

" + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

404 response

" + }, + { + "shape": "GatewayTimeoutException", + "documentation": "

504 response

" + }, + { + "shape": "TooManyRequestsException", + "documentation": "

429 response

" + }, + { + "shape": "BadRequestException", + "documentation": "

400 response

" + } + ], + "documentation": "

This operation creates a backend for an Amplify app. Backends are automatically created at the time of app creation.

" + }, + "CreateBackendAPI": { + "name": "CreateBackendAPI", + "http": { + "method": "POST", + "requestUri": "/backend/{appId}/api", + "responseCode": 200 + }, + "input": { + "shape": "CreateBackendAPIRequest" + }, + "output": { + "shape": "CreateBackendAPIResponse", + "documentation": "

200 response

" + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

404 response

" + }, + { + "shape": "GatewayTimeoutException", + "documentation": "

504 response

" + }, + { + "shape": "TooManyRequestsException", + "documentation": "

429 response

" + }, + { + "shape": "BadRequestException", + "documentation": "

400 response

" + } + ], + "documentation": "

Creates a new backend API resource.

" + }, + "CreateBackendAuth": { + "name": "CreateBackendAuth", + "http": { + "method": "POST", + "requestUri": "/backend/{appId}/auth", + "responseCode": 200 + }, + "input": { + "shape": "CreateBackendAuthRequest" + }, + "output": { + "shape": "CreateBackendAuthResponse", + "documentation": "

200 response

" + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

404 response

" + }, + { + "shape": "GatewayTimeoutException", + "documentation": "

504 response

" + }, + { + "shape": "TooManyRequestsException", + "documentation": "

429 response

" + }, + { + "shape": "BadRequestException", + "documentation": "

400 response

" + } + ], + "documentation": "

Creates a new backend authentication resource.

" + }, + "CreateBackendConfig": { + "name": "CreateBackendConfig", + "http": { + "method": "POST", + "requestUri": "/backend/{appId}/config", + "responseCode": 200 + }, + "input": { + "shape": "CreateBackendConfigRequest" + }, + "output": { + "shape": "CreateBackendConfigResponse", + "documentation": "

200 response

" + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

404 response

" + }, + { + "shape": "GatewayTimeoutException", + "documentation": "

504 response

" + }, + { + "shape": "TooManyRequestsException", + "documentation": "

429 response

" + }, + { + "shape": "BadRequestException", + "documentation": "

400 response

" + } + ], + "documentation": "

Creates a config object for a backend.

" + }, + "CreateToken": { + "name": "CreateToken", + "http": { + "method": "POST", + "requestUri": "/backend/{appId}/challenge", + "responseCode": 200 + }, + "input": { + "shape": "CreateTokenRequest" + }, + "output": { + "shape": "CreateTokenResponse", + "documentation": "

200 response

" + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

404 response

" + }, + { + "shape": "GatewayTimeoutException", + "documentation": "

504 response

" + }, + { + "shape": "TooManyRequestsException", + "documentation": "

429 response

" + }, + { + "shape": "BadRequestException", + "documentation": "

400 response

" + } + ], + "documentation": "

Generates a one-time challenge code to authenticate a user into your Amplify Admin UI.

" + }, + "DeleteBackend": { + "name": "DeleteBackend", + "http": { + "method": "POST", + "requestUri": "/backend/{appId}/environments/{backendEnvironmentName}/remove", + "responseCode": 200 + }, + "input": { + "shape": "DeleteBackendRequest" + }, + "output": { + "shape": "DeleteBackendResponse", + "documentation": "

200 response

" + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

404 response

" + }, + { + "shape": "GatewayTimeoutException", + "documentation": "

504 response

" + }, + { + "shape": "TooManyRequestsException", + "documentation": "

429 response

" + }, + { + "shape": "BadRequestException", + "documentation": "

400 response

" + } + ], + "documentation": "

Removes an existing environment from your Amplify project.

" + }, + "DeleteBackendAPI": { + "name": "DeleteBackendAPI", + "http": { + "method": "POST", + "requestUri": "/backend/{appId}/api/{backendEnvironmentName}/remove", + "responseCode": 200 + }, + "input": { + "shape": "DeleteBackendAPIRequest" + }, + "output": { + "shape": "DeleteBackendAPIResponse", + "documentation": "

200 response

" + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

404 response

" + }, + { + "shape": "GatewayTimeoutException", + "documentation": "

504 response

" + }, + { + "shape": "TooManyRequestsException", + "documentation": "

429 response

" + }, + { + "shape": "BadRequestException", + "documentation": "

400 response

" + } + ], + "documentation": "

Deletes an existing backend API resource.

" + }, + "DeleteBackendAuth": { + "name": "DeleteBackendAuth", + "http": { + "method": "POST", + "requestUri": "/backend/{appId}/auth/{backendEnvironmentName}/remove", + "responseCode": 200 + }, + "input": { + "shape": "DeleteBackendAuthRequest" + }, + "output": { + "shape": "DeleteBackendAuthResponse", + "documentation": "

200 response

" + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

404 response

" + }, + { + "shape": "GatewayTimeoutException", + "documentation": "

504 response

" + }, + { + "shape": "TooManyRequestsException", + "documentation": "

429 response

" + }, + { + "shape": "BadRequestException", + "documentation": "

400 response

" + } + ], + "documentation": "

Deletes an existing backend authentication resource.

" + }, + "DeleteToken": { + "name": "DeleteToken", + "http": { + "method": "POST", + "requestUri": "/backend/{appId}/challenge/{sessionId}/remove", + "responseCode": 200 + }, + "input": { + "shape": "DeleteTokenRequest" + }, + "output": { + "shape": "DeleteTokenResponse", + "documentation": "

200 response

" + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

404 response

" + }, + { + "shape": "GatewayTimeoutException", + "documentation": "

504 response

" + }, + { + "shape": "TooManyRequestsException", + "documentation": "

429 response

" + }, + { + "shape": "BadRequestException", + "documentation": "

400 response

" + } + ], + "documentation": "

Deletes the challenge token based on the given appId and sessionId.

" + }, + "GenerateBackendAPIModels": { + "name": "GenerateBackendAPIModels", + "http": { + "method": "POST", + "requestUri": "/backend/{appId}/api/{backendEnvironmentName}/generateModels", + "responseCode": 200 + }, + "input": { + "shape": "GenerateBackendAPIModelsRequest" + }, + "output": { + "shape": "GenerateBackendAPIModelsResponse", + "documentation": "

200 response

" + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

404 response

" + }, + { + "shape": "GatewayTimeoutException", + "documentation": "

504 response

" + }, + { + "shape": "TooManyRequestsException", + "documentation": "

429 response

" + }, + { + "shape": "BadRequestException", + "documentation": "

400 response

" + } + ], + "documentation": "

Generates a model schema for an existing backend API resource.

" + }, + "GetBackend": { + "name": "GetBackend", + "http": { + "method": "POST", + "requestUri": "/backend/{appId}/details", + "responseCode": 200 + }, + "input": { + "shape": "GetBackendRequest" + }, + "output": { + "shape": "GetBackendResponse", + "documentation": "

200 response

" + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

404 response

" + }, + { + "shape": "GatewayTimeoutException", + "documentation": "

504 response

" + }, + { + "shape": "TooManyRequestsException", + "documentation": "

429 response

" + }, + { + "shape": "BadRequestException", + "documentation": "

400 response

" + } + ], + "documentation": "

Provides project-level details for your Amplify UI project.

" + }, + "GetBackendAPI": { + "name": "GetBackendAPI", + "http": { + "method": "POST", + "requestUri": "/backend/{appId}/api/{backendEnvironmentName}/details", + "responseCode": 200 + }, + "input": { + "shape": "GetBackendAPIRequest" + }, + "output": { + "shape": "GetBackendAPIResponse", + "documentation": "

200 response

" + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

404 response

" + }, + { + "shape": "GatewayTimeoutException", + "documentation": "

504 response

" + }, + { + "shape": "TooManyRequestsException", + "documentation": "

429 response

" + }, + { + "shape": "BadRequestException", + "documentation": "

400 response

" + } + ], + "documentation": "

Gets the details for a backend API.

" + }, + "GetBackendAPIModels": { + "name": "GetBackendAPIModels", + "http": { + "method": "POST", + "requestUri": "/backend/{appId}/api/{backendEnvironmentName}/getModels", + "responseCode": 200 + }, + "input": { + "shape": "GetBackendAPIModelsRequest" + }, + "output": { + "shape": "GetBackendAPIModelsResponse", + "documentation": "

200 response

" + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

404 response

" + }, + { + "shape": "GatewayTimeoutException", + "documentation": "

504 response

" + }, + { + "shape": "TooManyRequestsException", + "documentation": "

429 response

" + }, + { + "shape": "BadRequestException", + "documentation": "

400 response

" + } + ], + "documentation": "

Generates a model schema for existing backend API resource.

" + }, + "GetBackendAuth": { + "name": "GetBackendAuth", + "http": { + "method": "POST", + "requestUri": "/backend/{appId}/auth/{backendEnvironmentName}/details", + "responseCode": 200 + }, + "input": { + "shape": "GetBackendAuthRequest" + }, + "output": { + "shape": "GetBackendAuthResponse", + "documentation": "

200 response

" + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

404 response

" + }, + { + "shape": "GatewayTimeoutException", + "documentation": "

504 response

" + }, + { + "shape": "TooManyRequestsException", + "documentation": "

429 response

" + }, + { + "shape": "BadRequestException", + "documentation": "

400 response

" + } + ], + "documentation": "

Gets backend auth details.

" + }, + "GetBackendJob": { + "name": "GetBackendJob", + "http": { + "method": "GET", + "requestUri": "/backend/{appId}/job/{backendEnvironmentName}/{jobId}", + "responseCode": 200 + }, + "input": { + "shape": "GetBackendJobRequest" + }, + "output": { + "shape": "GetBackendJobResponse", + "documentation": "

200 response

" + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

404 response

" + }, + { + "shape": "GatewayTimeoutException", + "documentation": "

504 response

" + }, + { + "shape": "TooManyRequestsException", + "documentation": "

429 response

" + }, + { + "shape": "BadRequestException", + "documentation": "

400 response

" + } + ], + "documentation": "

Returns information about a specific job.

" + }, + "GetToken": { + "name": "GetToken", + "http": { + "method": "GET", + "requestUri": "/backend/{appId}/challenge/{sessionId}", + "responseCode": 200 + }, + "input": { + "shape": "GetTokenRequest" + }, + "output": { + "shape": "GetTokenResponse", + "documentation": "

200 response

" + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

404 response

" + }, + { + "shape": "GatewayTimeoutException", + "documentation": "

504 response

" + }, + { + "shape": "TooManyRequestsException", + "documentation": "

429 response

" + }, + { + "shape": "BadRequestException", + "documentation": "

400 response

" + } + ], + "documentation": "

Gets the challenge token based on the given appId and sessionId.

" + }, + "ListBackendJobs": { + "name": "ListBackendJobs", + "http": { + "method": "POST", + "requestUri": "/backend/{appId}/job/{backendEnvironmentName}", + "responseCode": 200 + }, + "input": { + "shape": "ListBackendJobsRequest" + }, + "output": { + "shape": "ListBackendJobsResponse", + "documentation": "

200 response

" + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

404 response

" + }, + { + "shape": "GatewayTimeoutException", + "documentation": "

504 response

" + }, + { + "shape": "TooManyRequestsException", + "documentation": "

429 response

" + }, + { + "shape": "BadRequestException", + "documentation": "

400 response

" + } + ], + "documentation": "

Lists the jobs for the backend of an Amplify app.

" + }, + "RemoveAllBackends": { + "name": "RemoveAllBackends", + "http": { + "method": "POST", + "requestUri": "/backend/{appId}/remove", + "responseCode": 200 + }, + "input": { + "shape": "RemoveAllBackendsRequest" + }, + "output": { + "shape": "RemoveAllBackendsResponse", + "documentation": "

200 response

" + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

404 response

" + }, + { + "shape": "GatewayTimeoutException", + "documentation": "

504 response

" + }, + { + "shape": "TooManyRequestsException", + "documentation": "

429 response

" + }, + { + "shape": "BadRequestException", + "documentation": "

400 response

" + } + ], + "documentation": "

Removes all backend environments from your Amplify project.

" + }, + "RemoveBackendConfig": { + "name": "RemoveBackendConfig", + "http": { + "method": "POST", + "requestUri": "/backend/{appId}/config/remove", + "responseCode": 200 + }, + "input": { + "shape": "RemoveBackendConfigRequest" + }, + "output": { + "shape": "RemoveBackendConfigResponse", + "documentation": "

200 response

" + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

404 response

" + }, + { + "shape": "GatewayTimeoutException", + "documentation": "

504 response

" + }, + { + "shape": "TooManyRequestsException", + "documentation": "

429 response

" + }, + { + "shape": "BadRequestException", + "documentation": "

400 response

" + } + ], + "documentation": "

Removes the AWS resources that are required to access the Amplify Admin UI.

" + }, + "UpdateBackendAPI": { + "name": "UpdateBackendAPI", + "http": { + "method": "POST", + "requestUri": "/backend/{appId}/api/{backendEnvironmentName}", + "responseCode": 200 + }, + "input": { + "shape": "UpdateBackendAPIRequest" + }, + "output": { + "shape": "UpdateBackendAPIResponse", + "documentation": "

200 response

" + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

404 response

" + }, + { + "shape": "GatewayTimeoutException", + "documentation": "

504 response

" + }, + { + "shape": "TooManyRequestsException", + "documentation": "

429 response

" + }, + { + "shape": "BadRequestException", + "documentation": "

400 response

" + } + ], + "documentation": "

Updates an existing backend API resource.

" + }, + "UpdateBackendAuth": { + "name": "UpdateBackendAuth", + "http": { + "method": "POST", + "requestUri": "/backend/{appId}/auth/{backendEnvironmentName}", + "responseCode": 200 + }, + "input": { + "shape": "UpdateBackendAuthRequest" + }, + "output": { + "shape": "UpdateBackendAuthResponse", + "documentation": "

200 response

" + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

404 response

" + }, + { + "shape": "GatewayTimeoutException", + "documentation": "

504 response

" + }, + { + "shape": "TooManyRequestsException", + "documentation": "

429 response

" + }, + { + "shape": "BadRequestException", + "documentation": "

400 response

" + } + ], + "documentation": "

Updates an existing backend authentication resource.

" + }, + "UpdateBackendConfig": { + "name": "UpdateBackendConfig", + "http": { + "method": "POST", + "requestUri": "/backend/{appId}/config/update", + "responseCode": 200 + }, + "input": { + "shape": "UpdateBackendConfigRequest" + }, + "output": { + "shape": "UpdateBackendConfigResponse", + "documentation": "

200 response

" + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

404 response

" + }, + { + "shape": "GatewayTimeoutException", + "documentation": "

504 response

" + }, + { + "shape": "TooManyRequestsException", + "documentation": "

429 response

" + }, + { + "shape": "BadRequestException", + "documentation": "

400 response

" + } + ], + "documentation": "

Updates the AWS resources that are required to access the Amplify Admin UI.

" + }, + "UpdateBackendJob": { + "name": "UpdateBackendJob", + "http": { + "method": "POST", + "requestUri": "/backend/{appId}/job/{backendEnvironmentName}/{jobId}", + "responseCode": 200 + }, + "input": { + "shape": "UpdateBackendJobRequest" + }, + "output": { + "shape": "UpdateBackendJobResponse", + "documentation": "

200 response

" + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

404 response

" + }, + { + "shape": "GatewayTimeoutException", + "documentation": "

504 response

" + }, + { + "shape": "TooManyRequestsException", + "documentation": "

429 response

" + }, + { + "shape": "BadRequestException", + "documentation": "

400 response

" + } + ], + "documentation": "

Updates a specific job.

" + } + }, + "shapes": { + "AuthResources": { + "type": "string", + "enum": [ + "USER_POOL_ONLY", + "IDENTITY_POOL_AND_USER_POOL" + ] + }, + "BackendAPIAppSyncAuthSettings": { + "type": "structure", + "members": { + "CognitoUserPoolId": { + "shape": "__string", + "locationName": "cognitoUserPoolId", + "documentation": "

The Amazon Cognito user pool ID, if Amazon Cognito is used as an authentication setting to access your data models.

" + }, + "Description": { + "shape": "__string", + "locationName": "description", + "documentation": "

The API key description for API_KEY, if it is used as an authentication mechanism to access your data models.

" + }, + "ExpirationTime": { + "shape": "__double", + "locationName": "expirationTime", + "documentation": "

The API key expiration time for API_KEY, if it is used as an authentication mechanism to access your data models.

" + }, + "OpenIDAuthTTL": { + "shape": "__string", + "locationName": "openIDAuthTTL", + "documentation": "

The expiry time for the OpenID authentication mechanism.

" + }, + "OpenIDClientId": { + "shape": "__string", + "locationName": "openIDClientId", + "documentation": "

The clientID for openID, if openID is used as an authentication setting to access your data models.

" + }, + "OpenIDIatTTL": { + "shape": "__string", + "locationName": "openIDIatTTL", + "documentation": "

The expiry time for the OpenID authentication mechanism.

" + }, + "OpenIDIssueURL": { + "shape": "__string", + "locationName": "openIDIssueURL", + "documentation": "

The openID issuer URL, if openID is used as an authentication setting to access your data models.

" + }, + "OpenIDProviderName": { + "shape": "__string", + "locationName": "openIDProviderName", + "documentation": "

The openID provider name, if openID is used as an authentication mechanism to access your data models.

" + } + }, + "documentation": "

The authentication settings for accessing provisioned data models in your Amplify project.

" + }, + "BackendAPIAuthType": { + "type": "structure", + "members": { + "Mode": { + "shape": "Mode", + "locationName": "mode", + "documentation": "

Describes the authentication mode.

" + }, + "Settings": { + "shape": "BackendAPIAppSyncAuthSettings", + "locationName": "settings", + "documentation": "

Describes settings for the authentication mode.

" + } + }, + "documentation": "

Describes the auth types for your configured data models.

" + }, + "BackendAPICodegenReqObj": { + "type": "structure", + "members": { + "ResourceName": { + "shape": "__string", + "locationName": "resourceName", + "documentation": "

The name of this resource.

" + } + }, + "documentation": "

The request object for this operation.

", + "required": [ + "ResourceName" + ] + }, + "BackendAPICodegenRespObj": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

The name of the backend environment.

" + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

If the request failed, this is the returned error.

" + }, + "JobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

The ID for the job.

" + }, + "Operation": { + "shape": "__string", + "locationName": "operation", + "documentation": "

The name of the operation.

" + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

The current status of the request.

" + } + }, + "documentation": "

The response object sent when a backend is created.

", + "required": [ + "AppId", + "BackendEnvironmentName" + ] + }, + "BackendAPIConflictResolution": { + "type": "structure", + "members": { + "ResolutionStrategy": { + "shape": "ResolutionStrategy", + "locationName": "resolutionStrategy", + "documentation": "

The strategy for conflict resolution.

" + } + }, + "documentation": "

Describes the conflict resolution configuration for the data model configured in your Amplify project.

" + }, + "BackendAPIReqObj": { + "type": "structure", + "members": { + "ResourceConfig": { + "shape": "BackendAPIResourceConfig", + "locationName": "resourceConfig", + "documentation": "

Defines the resource configuration for the data model in your Amplify project.

" + }, + "ResourceName": { + "shape": "__string", + "locationName": "resourceName", + "documentation": "

The name of this resource.

" + } + }, + "documentation": "

The request object for this operation.

", + "required": [ + "ResourceName" + ] + }, + "BackendAPIResourceConfig": { + "type": "structure", + "members": { + "AdditionalAuthTypes": { + "shape": "ListOfBackendAPIAuthType", + "locationName": "additionalAuthTypes", + "documentation": "

Additional authentication methods used to interact with your data models.

" + }, + "ApiName": { + "shape": "__string", + "locationName": "apiName", + "documentation": "

The API name used to interact with the data model, configured as a part of the Amplify project.

" + }, + "ConflictResolution": { + "shape": "BackendAPIConflictResolution", + "locationName": "conflictResolution", + "documentation": "

The conflict resolution strategy for your data stored in the data models.

" + }, + "DefaultAuthType": { + "shape": "BackendAPIAuthType", + "locationName": "defaultAuthType", + "documentation": "

The default authentication type for interacting with the configured data models in your Amplify project.

" + }, + "Service": { + "shape": "__string", + "locationName": "service", + "documentation": "

The service used to provision and interact with the data model.

" + }, + "TransformSchema": { + "shape": "__string", + "locationName": "transformSchema", + "documentation": "

The definition of the data model in the annotated transform of the GraphQL schema.

" + } + }, + "documentation": "

The resource configuration for the data model, configured as a part of the Amplify project.

" + }, + "BackendAPIRespObj": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

The name of the backend environment.

" + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

If the request failed, this is the returned error.

" + }, + "JobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

The ID for the job.

" + }, + "Operation": { + "shape": "__string", + "locationName": "operation", + "documentation": "

The name of the operation.

" + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

The current status of the request.

" + } + }, + "documentation": "

The response object sent when a backend is created.

", + "required": [ + "AppId", + "BackendEnvironmentName" + ] + }, + "BackendAuthRespObj": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

The name of the backend environment.

" + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

If the request failed, this is the returned error.

" + }, + "JobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

The ID for the job.

" + }, + "Operation": { + "shape": "__string", + "locationName": "operation", + "documentation": "

The name of the operation.

" + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

The current status of the request.

" + } + }, + "documentation": "

The response object for this operation.

", + "required": [ + "AppId", + "BackendEnvironmentName" + ] + }, + "BackendAuthSocialProviderConfig": { + "type": "structure", + "members": { + "ClientId": { + "shape": "__string", + "locationName": "client_id", + "documentation": "

Describes the client_id that can be obtained from the third-party social federation provider.

" + }, + "ClientSecret": { + "shape": "__string", + "locationName": "client_secret", + "documentation": "

Describes the client_secret that can be obtained from third-party social federation providers.

" + } + }, + "documentation": "

Describes third-party social federation configurations for allowing your app users to sign in using OAuth.

" + }, + "BackendConfigRespObj": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "BackendManagerAppId": { + "shape": "__string", + "locationName": "backendManagerAppId", + "documentation": "

The app ID for the backend manager.

" + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

If the request failed, this is the returned error.

" + }, + "LoginAuthConfig": { + "shape": "LoginAuthConfigReqObj", + "locationName": "loginAuthConfig", + "documentation": "

Describes the Amazon Cognito configurations for the Admin UI auth resource to log in with.

" + } + }, + "documentation": "

The response object for this operation.

" + }, + "BackendJobReqObj": { + "type": "structure", + "members": { + "Operation": { + "shape": "__string", + "locationName": "operation", + "documentation": "

Filters the list of response objects to include only those with the specified operation name.

" + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

Filters the list of response objects to include only those with the specified status.

" + } + }, + "documentation": "

The request object for this operation.

" + }, + "BackendJobRespObj": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

The name of the backend environment.

" + }, + "CreateTime": { + "shape": "__string", + "locationName": "createTime", + "documentation": "

The time when the job was created.

" + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

If the request failed, this is the returned error.

" + }, + "JobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

The ID for the job.

" + }, + "Operation": { + "shape": "__string", + "locationName": "operation", + "documentation": "

The name of the operation.

" + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

The current status of the request.

" + }, + "UpdateTime": { + "shape": "__string", + "locationName": "updateTime", + "documentation": "

The time when the job was last updated.

" + } + }, + "documentation": "

The response object for this operation.

", + "required": [ + "AppId", + "BackendEnvironmentName" + ] + }, + "BadRequestException": { + "type": "structure", + "members": { + "Message": { + "shape": "__string", + "locationName": "message", + "documentation": "

An error message to inform that the request failed.

" + } + }, + "documentation": "

An error returned if a request is not formed properly.

", + "exception": true, + "error": { + "httpStatusCode": 400 + } + }, + "CloneBackendReqObj": { + "type": "structure", + "members": { + "TargetEnvironmentName": { + "shape": "__string", + "locationName": "targetEnvironmentName", + "documentation": "

The name of the destination backend environment to be created.

" + } + }, + "documentation": "

The request object for this operation.

", + "required": [ + "TargetEnvironmentName" + ] + }, + "CloneBackendRequest": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "location": "uri", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "BackendEnvironmentName": { + "shape": "__string", + "location": "uri", + "locationName": "backendEnvironmentName", + "documentation": "

The name of the backend environment.

" + }, + "TargetEnvironmentName": { + "shape": "__string", + "locationName": "targetEnvironmentName", + "documentation": "

The name of the destination backend environment to be created.

" + } + }, + "documentation": "

The request body for CloneBackend.

", + "required": [ + "AppId", + "BackendEnvironmentName", + "TargetEnvironmentName" + ] + }, + "CloneBackendRespObj": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

The name of the backend environment.

" + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

If the request failed, this is the returned error.

" + }, + "JobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

The ID for the job.

" + }, + "Operation": { + "shape": "__string", + "locationName": "operation", + "documentation": "

The name of the operation.

" + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

The current status of the request.

" + } + }, + "documentation": "

The response object sent when a backend is created.

", + "required": [ + "AppId", + "BackendEnvironmentName" + ] + }, + "CloneBackendResponse": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

The name of the backend environment.

" + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

If the request failed, this is the returned error.

" + }, + "JobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

The ID for the job.

" + }, + "Operation": { + "shape": "__string", + "locationName": "operation", + "documentation": "

The name of the operation.

" + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

The current status of the request.

" + } + } + }, + "CreateBackendAPIReqObj": { + "type": "structure", + "members": { + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

The name of the backend environment.

" + }, + "ResourceConfig": { + "shape": "BackendAPIResourceConfig", + "locationName": "resourceConfig", + "documentation": "

The resource configuration for this request.

" + }, + "ResourceName": { + "shape": "__string", + "locationName": "resourceName", + "documentation": "

The name of this resource.

" + } + }, + "documentation": "

The request object for this operation.

", + "required": [ + "ResourceName", + "BackendEnvironmentName", + "ResourceConfig" + ] + }, + "CreateBackendAPIRequest": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "location": "uri", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

The name of the backend environment.

" + }, + "ResourceConfig": { + "shape": "BackendAPIResourceConfig", + "locationName": "resourceConfig", + "documentation": "

The resource configuration for this request.

" + }, + "ResourceName": { + "shape": "__string", + "locationName": "resourceName", + "documentation": "

The name of this resource.

" + } + }, + "documentation": "

The request body for CreateBackendAPI.

", + "required": [ + "AppId", + "ResourceName", + "BackendEnvironmentName", + "ResourceConfig" + ] + }, + "CreateBackendAPIResponse": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

The name of the backend environment.

" + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

If the request failed, this is the returned error.

" + }, + "JobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

The ID for the job.

" + }, + "Operation": { + "shape": "__string", + "locationName": "operation", + "documentation": "

The name of the operation.

" + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

The current status of the request.

" + } + } + }, + "CreateBackendAuthForgotPasswordConfig": { + "type": "structure", + "members": { + "DeliveryMethod": { + "shape": "DeliveryMethod", + "locationName": "deliveryMethod", + "documentation": "

Describes which method to use (either SMS or email) to deliver messages to app users that want to recover their password.

" + }, + "EmailSettings": { + "shape": "EmailSettings", + "locationName": "emailSettings", + "documentation": "

The configuration for the email sent when an app user forgets their password.

" + }, + "SmsSettings": { + "shape": "SmsSettings", + "locationName": "smsSettings", + "documentation": "

The configuration for the SMS message sent when an app user forgets their password.

" + } + }, + "documentation": "

Describes the forgot password policy for authenticating into the Amplify app.

", + "required": [ + "DeliveryMethod" + ] + }, + "CreateBackendAuthIdentityPoolConfig": { + "type": "structure", + "members": { + "IdentityPoolName": { + "shape": "__string", + "locationName": "identityPoolName", + "documentation": "

Name of the identity pool used for authorization.

" + }, + "UnauthenticatedLogin": { + "shape": "__boolean", + "locationName": "unauthenticatedLogin", + "documentation": "

Set to true or false based on whether you want to enable guest authorization to your Amplify app.

" + } + }, + "documentation": "

Describes authorization configurations for the auth resources, configured as a part of your Amplify project.

", + "required": [ + "UnauthenticatedLogin", + "IdentityPoolName" + ] + }, + "CreateBackendAuthMFAConfig": { + "type": "structure", + "members": { + "MFAMode": { + "shape": "MFAMode", + "documentation": "

Describes whether MFA should be [ON, OFF, or OPTIONAL] for authentication in your Amplify project.

" + }, + "Settings": { + "shape": "Settings", + "locationName": "settings", + "documentation": "

Describes the configuration settings and methods for your Amplify app users to use MFA.

" + } + }, + "documentation": "

Describes whether to apply multi-factor authentication (MFA) policies for your Amazon Cognito user pool that's configured as a part of your Amplify project.

", + "required": [ + "MFAMode" + ] + }, + "CreateBackendAuthOAuthConfig": { + "type": "structure", + "members": { + "DomainPrefix": { + "shape": "__string", + "locationName": "domainPrefix", + "documentation": "

The domain prefix for your Amplify app.

" + }, + "OAuthGrantType": { + "shape": "OAuthGrantType", + "locationName": "oAuthGrantType", + "documentation": "

The OAuth grant type that you use to allow app users to authenticate from your Amplify app.

" + }, + "OAuthScopes": { + "shape": "ListOfOAuthScopesElement", + "locationName": "oAuthScopes", + "documentation": "

List of OAuth-related flows that allow your app users to authenticate from your Amplify app.

" + }, + "RedirectSignInURIs": { + "shape": "ListOf__string", + "locationName": "redirectSignInURIs", + "documentation": "

The redirected URI for signing in to your Amplify app.

" + }, + "RedirectSignOutURIs": { + "shape": "ListOf__string", + "locationName": "redirectSignOutURIs", + "documentation": "

Redirect URLs that OAuth uses when a user signs out of an Amplify app.

" + }, + "SocialProviderSettings": { + "shape": "SocialProviderSettings", + "locationName": "socialProviderSettings", + "documentation": "

The settings for using social identity providers for access to your Amplify app.

" + } + }, + "documentation": "

Creates the OAuth configuration for your Amplify project.

", + "required": [ + "RedirectSignOutURIs", + "RedirectSignInURIs", + "OAuthGrantType", + "OAuthScopes" + ] + }, + "CreateBackendAuthPasswordPolicyConfig": { + "type": "structure", + "members": { + "AdditionalConstraints": { + "shape": "ListOfAdditionalConstraintsElement", + "locationName": "additionalConstraints", + "documentation": "

Additional constraints for the password used to access the backend of your Amplify project.

" + }, + "MinimumLength": { + "shape": "__double", + "locationName": "minimumLength", + "documentation": "

The minimum length of the password used to access the backend of your Amplify project.

" + } + }, + "documentation": "

The password policy configuration for the backend of your Amplify project.

", + "required": [ + "MinimumLength" + ] + }, + "CreateBackendAuthReqObj": { + "type": "structure", + "members": { + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

The name of the backend environment.

" + }, + "ResourceConfig": { + "shape": "CreateBackendAuthResourceConfig", + "locationName": "resourceConfig", + "documentation": "

The resource configuration for this request object.

" + }, + "ResourceName": { + "shape": "__string", + "locationName": "resourceName", + "documentation": "

The name of this resource.

" + } + }, + "documentation": "

The request object for this operation.

", + "required": [ + "ResourceName", + "BackendEnvironmentName", + "ResourceConfig" + ] + }, + "CreateBackendAuthRequest": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "location": "uri", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

The name of the backend environment.

" + }, + "ResourceConfig": { + "shape": "CreateBackendAuthResourceConfig", + "locationName": "resourceConfig", + "documentation": "

The resource configuration for this request object.

" + }, + "ResourceName": { + "shape": "__string", + "locationName": "resourceName", + "documentation": "

The name of this resource.

" + } + }, + "documentation": "

The request body for CreateBackendAuth.

", + "required": [ + "AppId", + "ResourceName", + "BackendEnvironmentName", + "ResourceConfig" + ] + }, + "CreateBackendAuthResourceConfig": { + "type": "structure", + "members": { + "AuthResources": { + "shape": "AuthResources", + "locationName": "authResources", + "documentation": "

Defines whether you want to configure only authentication or both authentication and authorization settings.

" + }, + "IdentityPoolConfigs": { + "shape": "CreateBackendAuthIdentityPoolConfig", + "locationName": "identityPoolConfigs", + "documentation": "

Describes the authorization configuration for the Amazon Cognito identity pool, provisioned as a part of the auth resource in your Amplify project.

" + }, + "Service": { + "shape": "Service", + "locationName": "service", + "documentation": "

Defines the service name to use when configuring an authentication resource in your Amplify project.

" + }, + "UserPoolConfigs": { + "shape": "CreateBackendAuthUserPoolConfig", + "locationName": "userPoolConfigs", + "documentation": "

Describes the authentication configuration for the Amazon Cognito user pool, provisioned as a part of the auth resource in your Amplify project.

" + } + }, + "documentation": "

Defines the resource configuration when creating an auth resource in your Amplify project.

", + "required": [ + "AuthResources", + "UserPoolConfigs", + "Service" + ] + }, + "CreateBackendAuthResponse": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

The name of the backend environment.

" + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

If the request failed, this is the returned error.

" + }, + "JobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

The ID for the job.

" + }, + "Operation": { + "shape": "__string", + "locationName": "operation", + "documentation": "

The name of the operation.

" + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

The current status of the request.

" + } + } + }, + "CreateBackendAuthUserPoolConfig": { + "type": "structure", + "members": { + "ForgotPassword": { + "shape": "CreateBackendAuthForgotPasswordConfig", + "locationName": "forgotPassword", + "documentation": "

Describes the forgotten password policy for your Amazon Cognito user pool, configured as a part of your Amplify project.

" + }, + "Mfa": { + "shape": "CreateBackendAuthMFAConfig", + "locationName": "mfa", + "documentation": "

Describes whether to apply multi-factor authentication (MFA) policies for your Amazon Cognito user pool that's configured as a part of your Amplify project.

" + }, + "OAuth": { + "shape": "CreateBackendAuthOAuthConfig", + "locationName": "oAuth", + "documentation": "

Describes the OAuth policy and rules for your Amazon Cognito user pool, configured as a part of your Amplify project.

" + }, + "PasswordPolicy": { + "shape": "CreateBackendAuthPasswordPolicyConfig", + "locationName": "passwordPolicy", + "documentation": "

Describes the password policy for your Amazon Cognito user pool, configured as a part of your Amplify project.

" + }, + "RequiredSignUpAttributes": { + "shape": "ListOfRequiredSignUpAttributesElement", + "locationName": "requiredSignUpAttributes", + "documentation": "

The required attributes to sign up new users in the Amazon Cognito user pool.

" + }, + "SignInMethod": { + "shape": "SignInMethod", + "locationName": "signInMethod", + "documentation": "

Describes the sign-in methods that your Amplify app users to log in using the Amazon Cognito user pool that's configured as a part of your Amplify project.

" + }, + "UserPoolName": { + "shape": "__string", + "locationName": "userPoolName", + "documentation": "

The Amazon Cognito user pool name.

" + } + }, + "documentation": "

Describes the Amazon Cognito user pool configuration for the auth resource to be configured for your Amplify project.

", + "required": [ + "RequiredSignUpAttributes", + "SignInMethod", + "UserPoolName" + ] + }, + "CreateBackendConfigReqObj": { + "type": "structure", + "members": { + "BackendManagerAppId": { + "shape": "__string", + "locationName": "backendManagerAppId", + "documentation": "

The app ID for the backend manager.

" + } + }, + "documentation": "

The request object for this operation.

" + }, + "CreateBackendConfigRequest": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "location": "uri", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "BackendManagerAppId": { + "shape": "__string", + "locationName": "backendManagerAppId", + "documentation": "

The app ID for the backend manager.

" + } + }, + "documentation": "

The request body for CreateBackendConfig.

", + "required": [ + "AppId" + ] + }, + "CreateBackendConfigRespObj": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

The name of the backend environment.

" + }, + "JobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

The ID for the job.

" + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

The current status of the request.

" + } + }, + "documentation": "

The response object for this operation.

", + "required": [ + "AppId" + ] + }, + "CreateBackendConfigResponse": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

The name of the backend environment.

" + }, + "JobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

The ID for the job.

" + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

The current status of the request.

" + } + } + }, + "CreateBackendReqObj": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "AppName": { + "shape": "__string", + "locationName": "appName", + "documentation": "

The name of the app.

" + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

The name of the backend environment.

" + }, + "ResourceConfig": { + "shape": "ResourceConfig", + "locationName": "resourceConfig", + "documentation": "

The resource configuration for the backend creation request.

" + }, + "ResourceName": { + "shape": "__string", + "locationName": "resourceName", + "documentation": "

The name of the resource.

" + } + }, + "documentation": "

The request object for this operation.

", + "required": [ + "AppId", + "BackendEnvironmentName", + "AppName" + ] + }, + "CreateBackendRequest": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "AppName": { + "shape": "__string", + "locationName": "appName", + "documentation": "

The name of the app.

" + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

The name of the backend environment.

" + }, + "ResourceConfig": { + "shape": "ResourceConfig", + "locationName": "resourceConfig", + "documentation": "

The resource configuration for the backend creation request.

" + }, + "ResourceName": { + "shape": "__string", + "locationName": "resourceName", + "documentation": "

The name of the resource.

" + } + }, + "documentation": "

The request body for CreateBackend.

", + "required": [ + "AppId", + "BackendEnvironmentName", + "AppName" + ] + }, + "CreateBackendRespObj": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

The name of the backend environment.

" + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

If the request failed, this is the returned error.

" + }, + "JobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

The ID for the job.

" + }, + "Operation": { + "shape": "__string", + "locationName": "operation", + "documentation": "

The name of the operation.

" + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

The current status of the request.

" + } + }, + "documentation": "

The response object sent when a backend is created.

", + "required": [ + "AppId", + "BackendEnvironmentName" + ] + }, + "CreateBackendResponse": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

The name of the backend environment.

" + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

If the request failed, this is the returned error.

" + }, + "JobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

The ID for the job.

" + }, + "Operation": { + "shape": "__string", + "locationName": "operation", + "documentation": "

The name of the operation.

" + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

The current status of the request.

" + } + } + }, + "CreateTokenRequest": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "location": "uri", + "locationName": "appId", + "documentation": "

The app ID.

" + } + }, + "required": [ + "AppId" + ] + }, + "CreateTokenRespObj": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "ChallengeCode": { + "shape": "__string", + "locationName": "challengeCode", + "documentation": "

One-time challenge code for authenticating into the Amplify Admin UI.

" + }, + "SessionId": { + "shape": "__string", + "locationName": "sessionId", + "documentation": "

A unique ID provided when creating a new challenge token.

" + }, + "Ttl": { + "shape": "__string", + "locationName": "ttl", + "documentation": "

The expiry time for the one-time generated token code.

" + } + }, + "documentation": "

The response object for this operation.

", + "required": [ + "AppId", + "Ttl", + "SessionId", + "ChallengeCode" + ] + }, + "CreateTokenResponse": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "ChallengeCode": { + "shape": "__string", + "locationName": "challengeCode", + "documentation": "

One-time challenge code for authenticating into the Amplify Admin UI.

" + }, + "SessionId": { + "shape": "__string", + "locationName": "sessionId", + "documentation": "

A unique ID provided when creating a new challenge token.

" + }, + "Ttl": { + "shape": "__string", + "locationName": "ttl", + "documentation": "

The expiry time for the one-time generated token code.

" + } + } + }, + "DeleteBackendAPIRequest": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "location": "uri", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "BackendEnvironmentName": { + "shape": "__string", + "location": "uri", + "locationName": "backendEnvironmentName", + "documentation": "

The name of the backend environment.

" + }, + "ResourceConfig": { + "shape": "BackendAPIResourceConfig", + "locationName": "resourceConfig", + "documentation": "

Defines the resource configuration for the data model in your Amplify project.

" + }, + "ResourceName": { + "shape": "__string", + "locationName": "resourceName", + "documentation": "

The name of this resource.

" + } + }, + "documentation": "

The request body for DeleteBackendAPI.

", + "required": [ + "AppId", + "BackendEnvironmentName", + "ResourceName" + ] + }, + "DeleteBackendAPIResponse": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

The name of the backend environment.

" + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

If the request failed, this is the returned error.

" + }, + "JobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

The ID for the job.

" + }, + "Operation": { + "shape": "__string", + "locationName": "operation", + "documentation": "

The name of the operation.

" + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

The current status of the request.

" + } + } + }, + "DeleteBackendAuthRequest": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "location": "uri", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "BackendEnvironmentName": { + "shape": "__string", + "location": "uri", + "locationName": "backendEnvironmentName", + "documentation": "

The name of the backend environment.

" + }, + "ResourceName": { + "shape": "__string", + "locationName": "resourceName", + "documentation": "

The name of this resource.

" + } + }, + "documentation": "

The request body for DeleteBackendAuth.

", + "required": [ + "AppId", + "BackendEnvironmentName", + "ResourceName" + ] + }, + "DeleteBackendAuthResponse": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

The name of the backend environment.

" + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

If the request failed, this is the returned error.

" + }, + "JobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

The ID for the job.

" + }, + "Operation": { + "shape": "__string", + "locationName": "operation", + "documentation": "

The name of the operation.

" + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

The current status of the request.

" + } + } + }, + "DeleteBackendRequest": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "location": "uri", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "BackendEnvironmentName": { + "shape": "__string", + "location": "uri", + "locationName": "backendEnvironmentName", + "documentation": "

The name of the backend environment.

" + } + }, + "required": [ + "AppId", + "BackendEnvironmentName" + ] + }, + "DeleteBackendRespObj": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

The name of the backend environment.

" + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

If the request failed, this is the returned error.

" + }, + "JobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

The ID for the job.

" + }, + "Operation": { + "shape": "__string", + "locationName": "operation", + "documentation": "

The name of the operation.

" + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

The current status of the request.

" + } + }, + "documentation": "

The returned object for a request to delete a backend.

", + "required": [ + "AppId", + "BackendEnvironmentName" + ] + }, + "DeleteBackendResponse": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

The name of the backend environment.

" + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

If the request failed, this is the returned error.

" + }, + "JobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

The ID for the job.

" + }, + "Operation": { + "shape": "__string", + "locationName": "operation", + "documentation": "

The name of the operation.

" + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

The current status of the request.

" + } + } + }, + "DeleteTokenRequest": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "location": "uri", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "SessionId": { + "shape": "__string", + "location": "uri", + "locationName": "sessionId", + "documentation": "

The session ID.

" + } + }, + "required": [ + "SessionId", + "AppId" + ] + }, + "DeleteTokenRespObj": { + "type": "structure", + "members": { + "IsSuccess": { + "shape": "__boolean", + "locationName": "isSuccess", + "documentation": "

Indicates whether the request succeeded or failed.

" + } + }, + "documentation": "

The response object for this operation.

", + "required": [ + "IsSuccess" + ] + }, + "DeleteTokenResponse": { + "type": "structure", + "members": { + "IsSuccess": { + "shape": "__boolean", + "locationName": "isSuccess", + "documentation": "

Indicates whether the request succeeded or failed.

" + } + } + }, + "DeliveryMethod": { + "type": "string", + "enum": [ + "EMAIL", + "SMS" + ] + }, + "EmailSettings": { + "type": "structure", + "members": { + "EmailMessage": { + "shape": "__string", + "locationName": "emailMessage", + "documentation": "

The body of the email.

" + }, + "EmailSubject": { + "shape": "__string", + "locationName": "emailSubject", + "documentation": "

The subject of the email.

" + } + }, + "documentation": "

The configuration for the email sent when an app user forgets their password.

" + }, + "GatewayTimeoutException": { + "type": "structure", + "members": { + "Message": { + "shape": "__string", + "locationName": "message", + "documentation": "

An error message to inform that the request failed.

" + } + }, + "documentation": "

An error returned if there's a temporary issue with the service.

", + "exception": true, + "error": { + "httpStatusCode": 504 + } + }, + "GenerateBackendAPIModelsRequest": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "location": "uri", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "BackendEnvironmentName": { + "shape": "__string", + "location": "uri", + "locationName": "backendEnvironmentName", + "documentation": "

The name of the backend environment.

" + }, + "ResourceName": { + "shape": "__string", + "locationName": "resourceName", + "documentation": "

The name of this resource.

" + } + }, + "documentation": "

The request body for GenerateBackendAPIModels.

", + "required": [ + "AppId", + "BackendEnvironmentName", + "ResourceName" + ] + }, + "GenerateBackendAPIModelsResponse": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

The name of the backend environment.

" + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

If the request failed, this is the returned error.

" + }, + "JobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

The ID for the job.

" + }, + "Operation": { + "shape": "__string", + "locationName": "operation", + "documentation": "

The name of the operation.

" + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

The current status of the request.

" + } + } + }, + "GetBackendAPICodegenRespObj": { + "type": "structure", + "members": { + "Models": { + "shape": "__string", + "locationName": "models", + "documentation": "

Stringified JSON of the DataStore model.

" + }, + "Status": { + "shape": "Status", + "locationName": "status", + "documentation": "

The current status of the request.

" + } + }, + "documentation": "

The response object for this operation.

" + }, + "GetBackendAPIModelsRequest": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "location": "uri", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "BackendEnvironmentName": { + "shape": "__string", + "location": "uri", + "locationName": "backendEnvironmentName", + "documentation": "

The name of the backend environment.

" + }, + "ResourceName": { + "shape": "__string", + "locationName": "resourceName", + "documentation": "

The name of this resource.

" + } + }, + "documentation": "

The request body for GetBackendAPIModels.

", + "required": [ + "AppId", + "BackendEnvironmentName", + "ResourceName" + ] + }, + "GetBackendAPIModelsResponse": { + "type": "structure", + "members": { + "Models": { + "shape": "__string", + "locationName": "models", + "documentation": "

Stringified JSON of the DataStore model.

" + }, + "Status": { + "shape": "Status", + "locationName": "status", + "documentation": "

The current status of the request.

" + } + } + }, + "GetBackendAPIRequest": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "location": "uri", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "BackendEnvironmentName": { + "shape": "__string", + "location": "uri", + "locationName": "backendEnvironmentName", + "documentation": "

The name of the backend environment.

" + }, + "ResourceConfig": { + "shape": "BackendAPIResourceConfig", + "locationName": "resourceConfig", + "documentation": "

Defines the resource configuration for the data model in your Amplify project.

" + }, + "ResourceName": { + "shape": "__string", + "locationName": "resourceName", + "documentation": "

The name of this resource.

" + } + }, + "documentation": "

The request body for GetBackendAPI.

", + "required": [ + "AppId", + "BackendEnvironmentName", + "ResourceName" + ] + }, + "GetBackendAPIRespObj": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

The name of the backend environment.

" + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

If the request failed, this is the returned error.

" + }, + "ResourceConfig": { + "shape": "BackendAPIResourceConfig", + "locationName": "resourceConfig", + "documentation": "

The resource configuration for this response object.

" + }, + "ResourceName": { + "shape": "__string", + "locationName": "resourceName", + "documentation": "

The name of this resource.

" + } + }, + "documentation": "

The response object for this operation.

", + "required": [ + "AppId", + "BackendEnvironmentName" + ] + }, + "GetBackendAPIResponse": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

The name of the backend environment.

" + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

If the request failed, this is the returned error.

" + }, + "ResourceConfig": { + "shape": "BackendAPIResourceConfig", + "locationName": "resourceConfig", + "documentation": "

The resource configuration for this response object.

" + }, + "ResourceName": { + "shape": "__string", + "locationName": "resourceName", + "documentation": "

The name of this resource.

" + } + } + }, + "GetBackendAuthReqObj": { + "type": "structure", + "members": { + "ResourceName": { + "shape": "__string", + "locationName": "resourceName", + "documentation": "

The name of this resource.

" + } + }, + "documentation": "

The request object for this operation.

", + "required": [ + "ResourceName" + ] + }, + "GetBackendAuthRequest": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "location": "uri", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "BackendEnvironmentName": { + "shape": "__string", + "location": "uri", + "locationName": "backendEnvironmentName", + "documentation": "

The name of the backend environment.

" + }, + "ResourceName": { + "shape": "__string", + "locationName": "resourceName", + "documentation": "

The name of this resource.

" + } + }, + "documentation": "

The request body for GetBackendAuth.

", + "required": [ + "AppId", + "BackendEnvironmentName", + "ResourceName" + ] + }, + "GetBackendAuthRespObj": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

The name of the backend environment.

" + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

If the request failed, this is the returned error.

" + }, + "ResourceConfig": { + "shape": "CreateBackendAuthResourceConfig", + "locationName": "resourceConfig", + "documentation": "

The resource configuration for authorization requests to the backend of your Amplify project.

" + }, + "ResourceName": { + "shape": "__string", + "locationName": "resourceName", + "documentation": "

The name of this resource.

" + } + }, + "documentation": "

The response object for this operation.

", + "required": [ + "AppId", + "BackendEnvironmentName" + ] + }, + "GetBackendAuthResponse": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

The name of the backend environment.

" + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

If the request failed, this is the returned error.

" + }, + "ResourceConfig": { + "shape": "CreateBackendAuthResourceConfig", + "locationName": "resourceConfig", + "documentation": "

The resource configuration for authorization requests to the backend of your Amplify project.

" + }, + "ResourceName": { + "shape": "__string", + "locationName": "resourceName", + "documentation": "

The name of this resource.

" + } + } + }, + "GetBackendJobRequest": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "location": "uri", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "BackendEnvironmentName": { + "shape": "__string", + "location": "uri", + "locationName": "backendEnvironmentName", + "documentation": "

The name of the backend environment.

" + }, + "JobId": { + "shape": "__string", + "location": "uri", + "locationName": "jobId", + "documentation": "

The ID for the job.

" + } + }, + "required": [ + "AppId", + "BackendEnvironmentName", + "JobId" + ] + }, + "GetBackendJobResponse": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

The name of the backend environment.

" + }, + "CreateTime": { + "shape": "__string", + "locationName": "createTime", + "documentation": "

The time when the job was created.

" + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

If the request failed, this is the returned error.

" + }, + "JobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

The ID for the job.

" + }, + "Operation": { + "shape": "__string", + "locationName": "operation", + "documentation": "

The name of the operation.

" + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

The current status of the request.

" + }, + "UpdateTime": { + "shape": "__string", + "locationName": "updateTime", + "documentation": "

The time when the job was last updated.

" + } + } + }, + "GetBackendReqObj": { + "type": "structure", + "members": { + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

The name of the backend environment.

" + } + }, + "documentation": "

The request object for this operation.

" + }, + "GetBackendRequest": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "location": "uri", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

The name of the backend environment.

" + } + }, + "documentation": "

The request body for GetBackend.

", + "required": [ + "AppId" + ] + }, + "GetBackendRespObj": { + "type": "structure", + "members": { + "AmplifyMetaConfig": { + "shape": "__string", + "locationName": "amplifyMetaConfig", + "documentation": "

A stringified version of the current configurations for your Amplify project.

" + }, + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "AppName": { + "shape": "__string", + "locationName": "appName", + "documentation": "

The name of the app.

" + }, + "BackendEnvironmentList": { + "shape": "ListOf__string", + "locationName": "backendEnvironmentList", + "documentation": "

A list of backend environments in an array.

" + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

The name of the backend environment.

" + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

If the request failed, this is the returned error.

" + } + }, + "documentation": "

The response object for this operation.

", + "required": [ + "AppId" + ] + }, + "GetBackendResponse": { + "type": "structure", + "members": { + "AmplifyMetaConfig": { + "shape": "__string", + "locationName": "amplifyMetaConfig", + "documentation": "

A stringified version of the current configurations for your Amplify project.

" + }, + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "AppName": { + "shape": "__string", + "locationName": "appName", + "documentation": "

The name of the app.

" + }, + "BackendEnvironmentList": { + "shape": "ListOf__string", + "locationName": "backendEnvironmentList", + "documentation": "

A list of backend environments in an array.

" + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

The name of the backend environment.

" + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

If the request failed, this is the returned error.

" + } + } + }, + "GetTokenRequest": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "location": "uri", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "SessionId": { + "shape": "__string", + "location": "uri", + "locationName": "sessionId", + "documentation": "

The session ID.

" + } + }, + "required": [ + "SessionId", + "AppId" + ] + }, + "GetTokenRespObj": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "ChallengeCode": { + "shape": "__string", + "locationName": "challengeCode", + "documentation": "

The one-time challenge code for authenticating into the Amplify Admin UI.

" + }, + "SessionId": { + "shape": "__string", + "locationName": "sessionId", + "documentation": "

A unique ID provided when creating a new challenge token.

" + }, + "Ttl": { + "shape": "__string", + "locationName": "ttl", + "documentation": "

The expiry time for the one-time generated token code.

" + } + }, + "documentation": "

The response object for this operation.

", + "required": [ + "AppId", + "Ttl", + "SessionId", + "ChallengeCode" + ] + }, + "GetTokenResponse": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "ChallengeCode": { + "shape": "__string", + "locationName": "challengeCode", + "documentation": "

The one-time challenge code for authenticating into the Amplify Admin UI.

" + }, + "SessionId": { + "shape": "__string", + "locationName": "sessionId", + "documentation": "

A unique ID provided when creating a new challenge token.

" + }, + "Ttl": { + "shape": "__string", + "locationName": "ttl", + "documentation": "

The expiry time for the one-time generated token code.

" + } + } + }, + "InternalServiceException": { + "type": "structure", + "members": { + "Message": { + "shape": "__string", + "locationName": "message", + "documentation": "

An error message to inform that the request failed.

" + } + }, + "documentation": "

An error returned if there's a temporary issue with the service.

" + }, + "LimitExceededException": { + "type": "structure", + "members": { + "LimitType": { + "shape": "__string", + "locationName": "limitType", + "documentation": "

The type of limit that was exceeded.

" + }, + "Message": { + "shape": "__string", + "locationName": "message", + "documentation": "

An error message to inform that the request failed.

" + } + }, + "documentation": "

An error that is returned when a limit of a specific type is exceeded.

" + }, + "ListBackendJobReqObj": { + "type": "structure", + "members": { + "JobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

The ID for the job.

" + }, + "MaxResults": { + "shape": "__integerMin1Max25", + "locationName": "maxResults", + "documentation": "

The maximum number of results that you want in the response.

" + }, + "NextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "

The token for the next set of results.

" + }, + "Operation": { + "shape": "__string", + "locationName": "operation", + "documentation": "

Filters the list of response objects to include only those with the specified operation name.

" + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

Filters the list of response objects to include only those with the specified status.

" + } + }, + "documentation": "

The request object for this operation.

" + }, + "ListBackendJobRespObj": { + "type": "structure", + "members": { + "Jobs": { + "shape": "ListOfBackendJobRespObj", + "locationName": "jobs", + "documentation": "

An array of jobs and their properties.

" + }, + "NextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "

The token for the next set of results.

" + } + }, + "documentation": "

The returned list of backend jobs.

" + }, + "ListBackendJobsRequest": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "location": "uri", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "BackendEnvironmentName": { + "shape": "__string", + "location": "uri", + "locationName": "backendEnvironmentName", + "documentation": "

The name of the backend environment.

" + }, + "JobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

The ID for the job.

" + }, + "MaxResults": { + "shape": "__integerMin1Max25", + "locationName": "maxResults", + "documentation": "

The maximum number of results that you want in the response.

" + }, + "NextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "

The token for the next set of results.

" + }, + "Operation": { + "shape": "__string", + "locationName": "operation", + "documentation": "

Filters the list of response objects to include only those with the specified operation name.

" + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

Filters the list of response objects to include only those with the specified status.

" + } + }, + "documentation": "

The request body for ListBackendJobs.

", + "required": [ + "AppId", + "BackendEnvironmentName" + ] + }, + "ListBackendJobsResponse": { + "type": "structure", + "members": { + "Jobs": { + "shape": "ListOfBackendJobRespObj", + "locationName": "jobs", + "documentation": "

An array of jobs and their properties.

" + }, + "NextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "

The token for the next set of results.

" + } + } + }, + "LoginAuthConfigReqObj": { + "type": "structure", + "members": { + "AwsCognitoIdentityPoolId": { + "shape": "__string", + "locationName": "aws_cognito_identity_pool_id", + "documentation": "

The Amazon Cognito identity pool ID used for Amplify Admin UI login authorization.

" + }, + "AwsCognitoRegion": { + "shape": "__string", + "locationName": "aws_cognito_region", + "documentation": "

The AWS Region for the Amplify Admin UI login.

" + }, + "AwsUserPoolsId": { + "shape": "__string", + "locationName": "aws_user_pools_id", + "documentation": "

The Amazon Cognito user pool ID used for Amplify Admin UI login authentication.

" + }, + "AwsUserPoolsWebClientId": { + "shape": "__string", + "locationName": "aws_user_pools_web_client_id", + "documentation": "

The web client ID for the Amazon Cognito user pools.

" + } + }, + "documentation": "

The request object for this operation.

" + }, + "MFAMode": { + "type": "string", + "enum": [ + "ON", + "OFF", + "OPTIONAL" + ] + }, + "Mode": { + "type": "string", + "enum": [ + "API_KEY", + "AWS_IAM", + "AMAZON_COGNITO_USER_POOLS", + "OPENID_CONNECT" + ] + }, + "NotFoundException": { + "type": "structure", + "members": { + "Message": { + "shape": "__string", + "locationName": "message", + "documentation": "

An error message to inform that the request failed.

" + }, + "ResourceType": { + "shape": "__string", + "locationName": "resourceType", + "documentation": "

The type of resource that wasn't found.

" + } + }, + "documentation": "

An error returned when a specific resource type is not found.

", + "exception": true, + "error": { + "httpStatusCode": 404 + } + }, + "OAuthGrantType": { + "type": "string", + "enum": [ + "CODE", + "IMPLICIT" + ] + }, + "RemoveAllBackendsReqObj": { + "type": "structure", + "members": { + "CleanAmplifyApp": { + "shape": "__boolean", + "locationName": "cleanAmplifyApp", + "documentation": "

Cleans up the Amplify Console app if this value is set to true.

" + } + }, + "documentation": "

The request object for this operation.

" + }, + "RemoveAllBackendsRequest": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "location": "uri", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "CleanAmplifyApp": { + "shape": "__boolean", + "locationName": "cleanAmplifyApp", + "documentation": "

Cleans up the Amplify Console app if this value is set to true.

" + } + }, + "documentation": "

The request body for RemoveAllBackends.

", + "required": [ + "AppId" + ] + }, + "RemoveAllBackendsRespObj": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

If the request failed, this is the returned error.

" + }, + "JobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

The ID for the job.

" + }, + "Operation": { + "shape": "__string", + "locationName": "operation", + "documentation": "

The name of the operation.

" + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

The current status of the request.

" + } + }, + "documentation": "

The response object for this operation.

", + "required": [ + "AppId" + ] + }, + "RemoveAllBackendsResponse": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

If the request failed, this is the returned error.

" + }, + "JobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

The ID for the job.

" + }, + "Operation": { + "shape": "__string", + "locationName": "operation", + "documentation": "

The name of the operation.

" + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

The current status of the request.

" + } + } + }, + "RemoveBackendAuthReqObj": { + "type": "structure", + "members": { + "ResourceName": { + "shape": "__string", + "locationName": "resourceName", + "documentation": "

The name of this resource.

" + } + }, + "documentation": "

The request object for this operation.

", + "required": [ + "ResourceName" + ] + }, + "RemoveBackendConfigRequest": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "location": "uri", + "locationName": "appId", + "documentation": "

The app ID.

" + } + }, + "required": [ + "AppId" + ] + }, + "RemoveBackendConfigRespObj": { + "type": "structure", + "members": { + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

If the request failed, this is the returned error.

" + } + }, + "documentation": "

The response object for this operation.

" + }, + "RemoveBackendConfigResponse": { + "type": "structure", + "members": { + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

If the request failed, this is the returned error.

" + } + } + }, + "ResolutionStrategy": { + "type": "string", + "enum": [ + "OPTIMISTIC_CONCURRENCY", + "LAMBDA", + "AUTOMERGE", + "NONE" + ] + }, + "ResourceConfig": { + "type": "structure", + "members": {}, + "documentation": "

Defines the resource configuration for the data model in your Amplify project.

" + }, + "Service": { + "type": "string", + "enum": [ + "COGNITO" + ] + }, + "Settings": { + "type": "structure", + "members": { + "MfaTypes": { + "shape": "ListOfMfaTypesElement", + "locationName": "mfaTypes", + "documentation": "

The supported MFA types.

" + }, + "SmsMessage": { + "shape": "__string", + "locationName": "smsMessage", + "documentation": "

The body of the SMS message.

" + } + }, + "documentation": "

The settings of your MFA configuration for the backend of your Amplify project.

" + }, + "SignInMethod": { + "type": "string", + "enum": [ + "EMAIL", + "EMAIL_AND_PHONE_NUMBER", + "PHONE_NUMBER", + "USERNAME" + ] + }, + "SmsSettings": { + "type": "structure", + "members": { + "SmsMessage": { + "shape": "__string", + "locationName": "smsMessage", + "documentation": "

The body of the SMS message.

" + } + }, + "documentation": "

SMS settings for authentication.

" + }, + "SocialProviderSettings": { + "type": "structure", + "members": { + "Facebook": { + "shape": "BackendAuthSocialProviderConfig" + }, + "Google": { + "shape": "BackendAuthSocialProviderConfig" + }, + "LoginWithAmazon": { + "shape": "BackendAuthSocialProviderConfig" + } + }, + "documentation": "

The settings for using the social identity providers for access to your Amplify app.

" + }, + "Status": { + "type": "string", + "enum": [ + "LATEST", + "STALE" + ] + }, + "TooManyRequestsException": { + "type": "structure", + "members": { + "LimitType": { + "shape": "__string", + "locationName": "limitType", + "documentation": "

The type of limit that was exceeded.

" + }, + "Message": { + "shape": "__string", + "locationName": "message", + "documentation": "

An error message to inform that the request failed.

" + } + }, + "documentation": "

An error that is returned when a limit of a specific type is exceeded.

", + "exception": true, + "error": { + "httpStatusCode": 429 + } + }, + "UpdateBackendAPIRequest": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "location": "uri", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "BackendEnvironmentName": { + "shape": "__string", + "location": "uri", + "locationName": "backendEnvironmentName", + "documentation": "

The name of the backend environment.

" + }, + "ResourceConfig": { + "shape": "BackendAPIResourceConfig", + "locationName": "resourceConfig", + "documentation": "

Defines the resource configuration for the data model in your Amplify project.

" + }, + "ResourceName": { + "shape": "__string", + "locationName": "resourceName", + "documentation": "

The name of this resource.

" + } + }, + "documentation": "

The request body for UpdateBackendAPI.

", + "required": [ + "AppId", + "BackendEnvironmentName", + "ResourceName" + ] + }, + "UpdateBackendAPIResponse": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

The name of the backend environment.

" + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

If the request failed, this is the returned error.

" + }, + "JobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

The ID for the job.

" + }, + "Operation": { + "shape": "__string", + "locationName": "operation", + "documentation": "

The name of the operation.

" + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

The current status of the request.

" + } + } + }, + "UpdateBackendAuthForgotPasswordConfig": { + "type": "structure", + "members": { + "DeliveryMethod": { + "shape": "DeliveryMethod", + "locationName": "deliveryMethod", + "documentation": "

Describes which method to use (either SMS or email) to deliver messages to app users that want to recover their password.

" + }, + "EmailSettings": { + "shape": "EmailSettings", + "locationName": "emailSettings", + "documentation": "

The configuration for the email sent when an app user forgets their password.

" + }, + "SmsSettings": { + "shape": "SmsSettings", + "locationName": "smsSettings", + "documentation": "

The configuration for the SMS message sent when an Amplify app user forgets their password.

" + } + }, + "documentation": "

Describes the forgot password policy for authenticating into the Amplify app.

" + }, + "UpdateBackendAuthIdentityPoolConfig": { + "type": "structure", + "members": { + "UnauthenticatedLogin": { + "shape": "__boolean", + "locationName": "unauthenticatedLogin", + "documentation": "

A Boolean value that you can set to allow or disallow guest-level authorization into your Amplify app.

" + } + }, + "documentation": "

Describes the authorization configuration for the Amazon Cognito identity pool, provisioned as a part of your auth resource in the Amplify project.

" + }, + "UpdateBackendAuthMFAConfig": { + "type": "structure", + "members": { + "MFAMode": { + "shape": "MFAMode", + "documentation": "

The MFA mode for the backend of your Amplify project.

" + }, + "Settings": { + "shape": "Settings", + "locationName": "settings", + "documentation": "

The settings of your MFA configuration for the backend of your Amplify project.

" + } + }, + "documentation": "

Updates the multi-factor authentication (MFA) configuration for the backend of your Amplify project.

" + }, + "UpdateBackendAuthOAuthConfig": { + "type": "structure", + "members": { + "DomainPrefix": { + "shape": "__string", + "locationName": "domainPrefix", + "documentation": "

The Amazon Cognito domain prefix used to create a hosted UI for authentication.

" + }, + "OAuthGrantType": { + "shape": "OAuthGrantType", + "locationName": "oAuthGrantType", + "documentation": "

The OAuth grant type to allow app users to authenticate from your Amplify app.

" + }, + "OAuthScopes": { + "shape": "ListOfOAuthScopesElement", + "locationName": "oAuthScopes", + "documentation": "

The list of OAuth-related flows that can allow users to authenticate from your Amplify app.

" + }, + "RedirectSignInURIs": { + "shape": "ListOf__string", + "locationName": "redirectSignInURIs", + "documentation": "

Redirect URLs that OAuth uses when a user signs in to an Amplify app.

" + }, + "RedirectSignOutURIs": { + "shape": "ListOf__string", + "locationName": "redirectSignOutURIs", + "documentation": "

Redirect URLs that OAuth uses when a user signs out of an Amplify app.

" + }, + "SocialProviderSettings": { + "shape": "SocialProviderSettings", + "locationName": "socialProviderSettings", + "documentation": "

Describes third-party social federation configurations for allowing your users to sign in with OAuth.

" + } + }, + "documentation": "

The OAuth configurations for authenticating users into your Amplify app.

" + }, + "UpdateBackendAuthPasswordPolicyConfig": { + "type": "structure", + "members": { + "AdditionalConstraints": { + "shape": "ListOfAdditionalConstraintsElement", + "locationName": "additionalConstraints", + "documentation": "

Describes additional constrains on the password requirements to sign in to the auth resource, configured as a part of your Amplify project.

" + }, + "MinimumLength": { + "shape": "__double", + "locationName": "minimumLength", + "documentation": "

Describes the minimum length of the password required to sign in to the auth resource, configured as a part of your Amplify project.

" + } + }, + "documentation": "

Describes the password policy for your Amazon Cognito user pool that's configured as a part of your Amplify project.

" + }, + "UpdateBackendAuthReqObj": { + "type": "structure", + "members": { + "ResourceConfig": { + "shape": "UpdateBackendAuthResourceConfig", + "locationName": "resourceConfig", + "documentation": "

The resource configuration for this request object.

" + }, + "ResourceName": { + "shape": "__string", + "locationName": "resourceName", + "documentation": "

The name of this resource.

" + } + }, + "documentation": "

The request object for this operation.

", + "required": [ + "ResourceName", + "ResourceConfig" + ] + }, + "UpdateBackendAuthRequest": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "location": "uri", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "BackendEnvironmentName": { + "shape": "__string", + "location": "uri", + "locationName": "backendEnvironmentName", + "documentation": "

The name of the backend environment.

" + }, + "ResourceConfig": { + "shape": "UpdateBackendAuthResourceConfig", + "locationName": "resourceConfig", + "documentation": "

The resource configuration for this request object.

" + }, + "ResourceName": { + "shape": "__string", + "locationName": "resourceName", + "documentation": "

The name of this resource.

" + } + }, + "documentation": "

The request body for UpdateBackendAuth.

", + "required": [ + "AppId", + "BackendEnvironmentName", + "ResourceName", + "ResourceConfig" + ] + }, + "UpdateBackendAuthResourceConfig": { + "type": "structure", + "members": { + "AuthResources": { + "shape": "AuthResources", + "locationName": "authResources", + "documentation": "

Defines the service name to use when configuring an authentication resource in your Amplify project.

" + }, + "IdentityPoolConfigs": { + "shape": "UpdateBackendAuthIdentityPoolConfig", + "locationName": "identityPoolConfigs", + "documentation": "

Describes the authorization configuration for the Amazon Cognito identity pool, provisioned as a part of the auth resource in your Amplify project.

" + }, + "Service": { + "shape": "Service", + "locationName": "service", + "documentation": "

Defines the service name to use when configuring an authentication resource in your Amplify project.

" + }, + "UserPoolConfigs": { + "shape": "UpdateBackendAuthUserPoolConfig", + "locationName": "userPoolConfigs", + "documentation": "

Describes the authentication configuration for the Amazon Cognito user pool, provisioned as a part of the auth resource in your Amplify project.

" + } + }, + "documentation": "

Defines the resource configuration when updating an authentication resource in your Amplify project.

", + "required": [ + "AuthResources", + "UserPoolConfigs", + "Service" + ] + }, + "UpdateBackendAuthResponse": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

The name of the backend environment.

" + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

If the request failed, this is the returned error.

" + }, + "JobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

The ID for the job.

" + }, + "Operation": { + "shape": "__string", + "locationName": "operation", + "documentation": "

The name of the operation.

" + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

The current status of the request.

" + } + } + }, + "UpdateBackendAuthUserPoolConfig": { + "type": "structure", + "members": { + "ForgotPassword": { + "shape": "UpdateBackendAuthForgotPasswordConfig", + "locationName": "forgotPassword", + "documentation": "

Describes the forgot password policy for your Amazon Cognito user pool, configured as a part of your Amplify project.

" + }, + "Mfa": { + "shape": "UpdateBackendAuthMFAConfig", + "locationName": "mfa", + "documentation": "

Describes whether to apply multi-factor authentication (MFA) policies for your Amazon Cognito user pool that's configured as a part of your Amplify project.

" + }, + "OAuth": { + "shape": "UpdateBackendAuthOAuthConfig", + "locationName": "oAuth", + "documentation": "

Describes the OAuth policy and rules for your Amazon Cognito user pool, configured as a part of your Amplify project.

" + }, + "PasswordPolicy": { + "shape": "UpdateBackendAuthPasswordPolicyConfig", + "locationName": "passwordPolicy", + "documentation": "

Describes the password policy for your Amazon Cognito user pool, configured as a part of your Amplify project.

" + } + }, + "documentation": "

Describes the Amazon Cognito user pool configuration to configure the authorization resource for your Amplify project on an update.

" + }, + "UpdateBackendConfigReqObj": { + "type": "structure", + "members": { + "LoginAuthConfig": { + "shape": "LoginAuthConfigReqObj", + "locationName": "loginAuthConfig", + "documentation": "

Describes the Amazon Cognito configuration for Admin UI access.

" + } + }, + "documentation": "

The request object for this operation.

" + }, + "UpdateBackendConfigRequest": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "location": "uri", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "LoginAuthConfig": { + "shape": "LoginAuthConfigReqObj", + "locationName": "loginAuthConfig", + "documentation": "

Describes the Amazon Cognito configuration for Admin UI access.

" + } + }, + "documentation": "

The request body for UpdateBackendConfig.

", + "required": [ + "AppId" + ] + }, + "UpdateBackendConfigResponse": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "BackendManagerAppId": { + "shape": "__string", + "locationName": "backendManagerAppId", + "documentation": "

The app ID for the backend manager.

" + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

If the request failed, this is the returned error.

" + }, + "LoginAuthConfig": { + "shape": "LoginAuthConfigReqObj", + "locationName": "loginAuthConfig", + "documentation": "

Describes the Amazon Cognito configurations for the Admin UI auth resource to log in with.

" + } + } + }, + "UpdateBackendJobRequest": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "location": "uri", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "BackendEnvironmentName": { + "shape": "__string", + "location": "uri", + "locationName": "backendEnvironmentName", + "documentation": "

The name of the backend environment.

" + }, + "JobId": { + "shape": "__string", + "location": "uri", + "locationName": "jobId", + "documentation": "

The ID for the job.

" + }, + "Operation": { + "shape": "__string", + "locationName": "operation", + "documentation": "

Filters the list of response objects to include only those with the specified operation name.

" + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

Filters the list of response objects to include only those with the specified status.

" + } + }, + "documentation": "

The request body for GetBackendJob.

", + "required": [ + "AppId", + "BackendEnvironmentName", + "JobId" + ] + }, + "UpdateBackendJobResponse": { + "type": "structure", + "members": { + "AppId": { + "shape": "__string", + "locationName": "appId", + "documentation": "

The app ID.

" + }, + "BackendEnvironmentName": { + "shape": "__string", + "locationName": "backendEnvironmentName", + "documentation": "

The name of the backend environment.

" + }, + "CreateTime": { + "shape": "__string", + "locationName": "createTime", + "documentation": "

The time when the job was created.

" + }, + "Error": { + "shape": "__string", + "locationName": "error", + "documentation": "

If the request failed, this is the returned error.

" + }, + "JobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

The ID for the job.

" + }, + "Operation": { + "shape": "__string", + "locationName": "operation", + "documentation": "

The name of the operation.

" + }, + "Status": { + "shape": "__string", + "locationName": "status", + "documentation": "

The current status of the request.

" + }, + "UpdateTime": { + "shape": "__string", + "locationName": "updateTime", + "documentation": "

The time when the job was last updated.

" + } + } + }, + "AdditionalConstraintsElement": { + "type": "string", + "enum": [ + "REQUIRE_DIGIT", + "REQUIRE_LOWERCASE", + "REQUIRE_SYMBOL", + "REQUIRE_UPPERCASE" + ] + }, + "MfaTypesElement": { + "type": "string", + "enum": [ + "SMS", + "TOTP" + ] + }, + "OAuthScopesElement": { + "type": "string", + "enum": [ + "PHONE", + "EMAIL", + "OPENID", + "PROFILE", + "AWS_COGNITO_SIGNIN_USER_ADMIN" + ] + }, + "RequiredSignUpAttributesElement": { + "type": "string", + "enum": [ + "ADDRESS", + "BIRTHDATE", + "EMAIL", + "FAMILY_NAME", + "GENDER", + "GIVEN_NAME", + "LOCALE", + "MIDDLE_NAME", + "NAME", + "NICKNAME", + "PHONE_NUMBER", + "PICTURE", + "PREFERRED_USERNAME", + "PROFILE", + "UPDATED_AT", + "WEBSITE", + "ZONE_INFO" + ] + }, + "__boolean": { + "type": "boolean" + }, + "__double": { + "type": "double" + }, + "__integer": { + "type": "integer" + }, + "__integerMin1Max25": { + "type": "integer", + "min": 1, + "max": 25 + }, + "ListOfBackendAPIAuthType": { + "type": "list", + "member": { + "shape": "BackendAPIAuthType" + } + }, + "ListOfBackendJobRespObj": { + "type": "list", + "member": { + "shape": "BackendJobRespObj" + } + }, + "ListOfAdditionalConstraintsElement": { + "type": "list", + "member": { + "shape": "AdditionalConstraintsElement" + } + }, + "ListOfMfaTypesElement": { + "type": "list", + "member": { + "shape": "MfaTypesElement" + } + }, + "ListOfOAuthScopesElement": { + "type": "list", + "member": { + "shape": "OAuthScopesElement" + } + }, + "ListOfRequiredSignUpAttributesElement": { + "type": "list", + "member": { + "shape": "RequiredSignUpAttributesElement" + } + }, + "ListOf__string": { + "type": "list", + "member": { + "shape": "__string" + } + }, + "__long": { + "type": "long" + }, + "__string": { + "type": "string" + }, + "__timestampIso8601": { + "type": "timestamp", + "timestampFormat": "iso8601" + }, + "__timestampUnix": { + "type": "timestamp", + "timestampFormat": "unixTimestamp" + } + }, + "documentation": "

AWS Amplify Admin API

" +} diff --git a/services/apigateway/pom.xml b/services/apigateway/pom.xml index 7a690b617b5b..8f3b7e01495c 100644 --- a/services/apigateway/pom.xml +++ b/services/apigateway/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT apigateway AWS Java SDK :: Services :: Amazon API Gateway diff --git a/services/apigateway/src/main/resources/codegen-resources/service-2.json b/services/apigateway/src/main/resources/codegen-resources/service-2.json index 5f8f2f2d73eb..04b5dd539a41 100755 --- a/services/apigateway/src/main/resources/codegen-resources/service-2.json +++ b/services/apigateway/src/main/resources/codegen-resources/service-2.json @@ -2145,7 +2145,7 @@ }, "providerARNs":{ "shape":"ListOfARNs", - "documentation":"

A list of the Amazon Cognito user pool ARNs for the COGNITO_USER_POOLS authorizer. Each element is of this format: arn:aws:cognito-idp:{region}:{account_id}:userpool/{user_pool_id}. For a TOKEN or REQUEST authorizer, this is not defined.

" + "documentation":"

A list of the Amazon Cognito user pool ARNs for the COGNITO_USER_POOLS authorizer. Each element is of this format: arn:aws:cognito-idp:{region}:{account_id}:userpool/{user_pool_id}. For a TOKEN or REQUEST authorizer, this is not defined.

" }, "authType":{ "shape":"String", @@ -2410,7 +2410,7 @@ }, "providerARNs":{ "shape":"ListOfARNs", - "documentation":"

A list of the Amazon Cognito user pool ARNs for the COGNITO_USER_POOLS authorizer. Each element is of this format: arn:aws:cognito-idp:{region}:{account_id}:userpool/{user_pool_id}. For a TOKEN or REQUEST authorizer, this is not defined.

" + "documentation":"

A list of the Amazon Cognito user pool ARNs for the COGNITO_USER_POOLS authorizer. Each element is of this format: arn:aws:cognito-idp:{region}:{account_id}:userpool/{user_pool_id}. For a TOKEN or REQUEST authorizer, this is not defined.

" }, "authType":{ "shape":"String", @@ -2603,7 +2603,7 @@ }, "endpointConfiguration":{ "shape":"EndpointConfiguration", - "documentation":"

The endpoint configuration of this DomainName showing the endpoint types of the domain name.

" + "documentation":"

The endpoint configuration of this DomainName showing the endpoint types of the domain name.

" }, "tags":{ "shape":"MapOfStringToString", @@ -2732,11 +2732,11 @@ }, "apiKeySource":{ "shape":"ApiKeySourceType", - "documentation":"

The source of the API key for metering requests according to a usage plan. Valid values are:

  • HEADER to read the API key from the X-API-Key header of a request.
  • AUTHORIZER to read the API key from the UsageIdentifierKey from a custom authorizer.

" + "documentation":"

The source of the API key for metering requests according to a usage plan. Valid values are:

  • HEADER to read the API key from the X-API-Key header of a request.
  • AUTHORIZER to read the API key from the UsageIdentifierKey from a custom authorizer.

" }, "endpointConfiguration":{ "shape":"EndpointConfiguration", - "documentation":"

The endpoint configuration of this RestApi showing the endpoint types of the API.

" + "documentation":"

The endpoint configuration of this RestApi showing the endpoint types of the API.

" }, "policy":{ "shape":"String", @@ -2745,6 +2745,10 @@ "tags":{ "shape":"MapOfStringToString", "documentation":"

The key-value map of strings. The valid character set is [a-zA-Z+-=._:/]. The tag key can be up to 128 characters and must not start with aws:. The tag value can be up to 256 characters.

" + }, + "disableExecuteApiEndpoint":{ + "shape":"Boolean", + "documentation":"

Specifies whether clients can invoke your API by using the default execute-api endpoint. By default, clients can invoke your API with the default https://{api_id}.execute-api.{region}.amazonaws.com endpoint. To require that clients use a custom domain name to invoke your API, disable the default endpoint.

" } }, "documentation":"

The POST Request to add a new RestApi resource to your collection.

" @@ -3557,7 +3561,7 @@ }, "endpointConfiguration":{ "shape":"EndpointConfiguration", - "documentation":"

The endpoint configuration of this DomainName showing the endpoint types of the domain name.

" + "documentation":"

The endpoint configuration of this DomainName showing the endpoint types of the domain name.

" }, "domainNameStatus":{ "shape":"DomainNameStatus", @@ -3717,7 +3721,7 @@ "documentation":"

A Boolean flag to indicate whether this GatewayResponse is the default gateway response (true) or not (false). A default gateway response is one generated by API Gateway without any customization by an API developer.

" } }, - "documentation":"

A gateway response of a given response type and status code, with optional response parameters and mapping templates.

For more information about valid gateway response types, see Gateway Response Types Supported by API Gateway

Example: Get a Gateway Response of a given response type

Request

This example shows how to get a gateway response of the MISSING_AUTHENTICATION_TOKEN type.

GET /restapis/o81lxisefl/gatewayresponses/MISSING_AUTHENTICATION_TOKEN HTTP/1.1 Host: beta-apigateway.us-east-1.amazonaws.com Content-Type: application/json X-Amz-Date: 20170503T202516Z Authorization: AWS4-HMAC-SHA256 Credential={access-key-id}/20170503/us-east-1/apigateway/aws4_request, SignedHeaders=content-type;host;x-amz-date, Signature=1b52460e3159c1a26cff29093855d50ea141c1c5b937528fecaf60f51129697a Cache-Control: no-cache Postman-Token: 3b2a1ce9-c848-2e26-2e2f-9c2caefbed45 

The response type is specified as a URL path.

Response

The successful operation returns the 200 OK status code and a payload similar to the following:

{ \"_links\": { \"curies\": { \"href\": \"http://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-gatewayresponse-{rel}.html\", \"name\": \"gatewayresponse\", \"templated\": true }, \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/MISSING_AUTHENTICATION_TOKEN\" }, \"gatewayresponse:delete\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/MISSING_AUTHENTICATION_TOKEN\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/MISSING_AUTHENTICATION_TOKEN\" } }, \"defaultResponse\": false, \"responseParameters\": { \"gatewayresponse.header.x-request-path\": \"method.request.path.petId\", \"gatewayresponse.header.Access-Control-Allow-Origin\": \"'a.b.c'\", \"gatewayresponse.header.x-request-query\": \"method.request.querystring.q\", \"gatewayresponse.header.x-request-header\": \"method.request.header.Accept\" }, \"responseTemplates\": { \"application/json\": \"{\\n \\\"message\\\": $context.error.messageString,\\n \\\"type\\\": \\\"$context.error.responseType\\\",\\n \\\"stage\\\": \\\"$context.stage\\\",\\n \\\"resourcePath\\\": \\\"$context.resourcePath\\\",\\n \\\"stageVariables.a\\\": \\\"$stageVariables.a\\\",\\n \\\"statusCode\\\": \\\"'404'\\\"\\n}\" }, \"responseType\": \"MISSING_AUTHENTICATION_TOKEN\", \"statusCode\": \"404\" }

" + "documentation":"

A gateway response of a given response type and status code, with optional response parameters and mapping templates.

For more information about valid gateway response types, see Gateway Response Types Supported by API Gateway

Example: Get a Gateway Response of a given response type

Request

This example shows how to get a gateway response of the MISSING_AUTHENTICATION_TOKEN type.

GET /restapis/o81lxisefl/gatewayresponses/MISSING_AUTHENTICATION_TOKEN HTTP/1.1 Host: beta-apigateway.us-east-1.amazonaws.com Content-Type: application/json X-Amz-Date: 20170503T202516Z Authorization: AWS4-HMAC-SHA256 Credential={access-key-id}/20170503/us-east-1/apigateway/aws4_request, SignedHeaders=content-type;host;x-amz-date, Signature=1b52460e3159c1a26cff29093855d50ea141c1c5b937528fecaf60f51129697a Cache-Control: no-cache Postman-Token: 3b2a1ce9-c848-2e26-2e2f-9c2caefbed45 

The response type is specified as a URL path.

Response

The successful operation returns the 200 OK status code and a payload similar to the following:

{ \"_links\": { \"curies\": { \"href\": \"http://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-gatewayresponse-{rel}.html\", \"name\": \"gatewayresponse\", \"templated\": true }, \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/MISSING_AUTHENTICATION_TOKEN\" }, \"gatewayresponse:delete\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/MISSING_AUTHENTICATION_TOKEN\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/MISSING_AUTHENTICATION_TOKEN\" } }, \"defaultResponse\": false, \"responseParameters\": { \"gatewayresponse.header.x-request-path\": \"method.request.path.petId\", \"gatewayresponse.header.Access-Control-Allow-Origin\": \"'a.b.c'\", \"gatewayresponse.header.x-request-query\": \"method.request.querystring.q\", \"gatewayresponse.header.x-request-header\": \"method.request.header.Accept\" }, \"responseTemplates\": { \"application/json\": \"{\\n \\\"message\\\": $context.error.messageString,\\n \\\"type\\\": \\\"$context.error.responseType\\\",\\n \\\"stage\\\": \\\"$context.stage\\\",\\n \\\"resourcePath\\\": \\\"$context.resourcePath\\\",\\n \\\"stageVariables.a\\\": \\\"$stageVariables.a\\\",\\n \\\"statusCode\\\": \\\"'404'\\\"\\n}\" }, \"responseType\": \"MISSING_AUTHENTICATION_TOKEN\", \"statusCode\": \"404\" }

" }, "GatewayResponseType":{ "type":"string", @@ -3754,7 +3758,7 @@ "locationName":"item" } }, - "documentation":"

The collection of the GatewayResponse instances of a RestApi as a responseType-to-GatewayResponse object map of key-value pairs. As such, pagination is not supported for querying this collection.

For more information about valid gateway response types, see Gateway Response Types Supported by API Gateway

Example: Get the collection of gateway responses of an API

Request

This example request shows how to retrieve the GatewayResponses collection from an API.

GET /restapis/o81lxisefl/gatewayresponses HTTP/1.1 Host: beta-apigateway.us-east-1.amazonaws.com Content-Type: application/json X-Amz-Date: 20170503T220604Z Authorization: AWS4-HMAC-SHA256 Credential={access-key-id}/20170503/us-east-1/apigateway/aws4_request, SignedHeaders=content-type;host;x-amz-date, Signature=59b42fe54a76a5de8adf2c67baa6d39206f8e9ad49a1d77ccc6a5da3103a398a Cache-Control: no-cache Postman-Token: 5637af27-dc29-fc5c-9dfe-0645d52cb515 

Response

The successful operation returns the 200 OK status code and a payload similar to the following:

{ \"_links\": { \"curies\": { \"href\": \"http://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-gatewayresponse-{rel}.html\", \"name\": \"gatewayresponse\", \"templated\": true }, \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses\" }, \"first\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses\" }, \"gatewayresponse:by-type\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"item\": [ { \"href\": \"/restapis/o81lxisefl/gatewayresponses/INTEGRATION_FAILURE\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/RESOURCE_NOT_FOUND\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/REQUEST_TOO_LARGE\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/THROTTLED\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/UNSUPPORTED_MEDIA_TYPE\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/AUTHORIZER_CONFIGURATION_ERROR\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/DEFAULT_5XX\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/DEFAULT_4XX\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/BAD_REQUEST_PARAMETERS\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/BAD_REQUEST_BODY\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/EXPIRED_TOKEN\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/ACCESS_DENIED\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/INVALID_API_KEY\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/UNAUTHORIZED\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/API_CONFIGURATION_ERROR\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/QUOTA_EXCEEDED\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/INTEGRATION_TIMEOUT\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/MISSING_AUTHENTICATION_TOKEN\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/INVALID_SIGNATURE\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/AUTHORIZER_FAILURE\" } ] }, \"_embedded\": { \"item\": [ { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/INTEGRATION_FAILURE\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/INTEGRATION_FAILURE\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"INTEGRATION_FAILURE\", \"statusCode\": \"504\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/RESOURCE_NOT_FOUND\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/RESOURCE_NOT_FOUND\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"RESOURCE_NOT_FOUND\", \"statusCode\": \"404\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/REQUEST_TOO_LARGE\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/REQUEST_TOO_LARGE\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"REQUEST_TOO_LARGE\", \"statusCode\": \"413\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/THROTTLED\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/THROTTLED\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"THROTTLED\", \"statusCode\": \"429\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/UNSUPPORTED_MEDIA_TYPE\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/UNSUPPORTED_MEDIA_TYPE\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"UNSUPPORTED_MEDIA_TYPE\", \"statusCode\": \"415\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/AUTHORIZER_CONFIGURATION_ERROR\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/AUTHORIZER_CONFIGURATION_ERROR\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"AUTHORIZER_CONFIGURATION_ERROR\", \"statusCode\": \"500\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/DEFAULT_5XX\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/DEFAULT_5XX\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"DEFAULT_5XX\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/DEFAULT_4XX\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/DEFAULT_4XX\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"DEFAULT_4XX\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/BAD_REQUEST_PARAMETERS\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/BAD_REQUEST_PARAMETERS\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"BAD_REQUEST_PARAMETERS\", \"statusCode\": \"400\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/BAD_REQUEST_BODY\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/BAD_REQUEST_BODY\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"BAD_REQUEST_BODY\", \"statusCode\": \"400\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/EXPIRED_TOKEN\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/EXPIRED_TOKEN\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"EXPIRED_TOKEN\", \"statusCode\": \"403\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/ACCESS_DENIED\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/ACCESS_DENIED\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"ACCESS_DENIED\", \"statusCode\": \"403\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/INVALID_API_KEY\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/INVALID_API_KEY\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"INVALID_API_KEY\", \"statusCode\": \"403\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/UNAUTHORIZED\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/UNAUTHORIZED\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"UNAUTHORIZED\", \"statusCode\": \"401\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/API_CONFIGURATION_ERROR\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/API_CONFIGURATION_ERROR\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"API_CONFIGURATION_ERROR\", \"statusCode\": \"500\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/QUOTA_EXCEEDED\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/QUOTA_EXCEEDED\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"QUOTA_EXCEEDED\", \"statusCode\": \"429\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/INTEGRATION_TIMEOUT\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/INTEGRATION_TIMEOUT\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"INTEGRATION_TIMEOUT\", \"statusCode\": \"504\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/MISSING_AUTHENTICATION_TOKEN\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/MISSING_AUTHENTICATION_TOKEN\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"MISSING_AUTHENTICATION_TOKEN\", \"statusCode\": \"403\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/INVALID_SIGNATURE\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/INVALID_SIGNATURE\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"INVALID_SIGNATURE\", \"statusCode\": \"403\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/AUTHORIZER_FAILURE\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/AUTHORIZER_FAILURE\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"AUTHORIZER_FAILURE\", \"statusCode\": \"500\" } ] } }

" + "documentation":"

The collection of the GatewayResponse instances of a RestApi as a responseType-to-GatewayResponse object map of key-value pairs. As such, pagination is not supported for querying this collection.

For more information about valid gateway response types, see Gateway Response Types Supported by API Gateway

Example: Get the collection of gateway responses of an API

Request

This example request shows how to retrieve the GatewayResponses collection from an API.

GET /restapis/o81lxisefl/gatewayresponses HTTP/1.1 Host: beta-apigateway.us-east-1.amazonaws.com Content-Type: application/json X-Amz-Date: 20170503T220604Z Authorization: AWS4-HMAC-SHA256 Credential={access-key-id}/20170503/us-east-1/apigateway/aws4_request, SignedHeaders=content-type;host;x-amz-date, Signature=59b42fe54a76a5de8adf2c67baa6d39206f8e9ad49a1d77ccc6a5da3103a398a Cache-Control: no-cache Postman-Token: 5637af27-dc29-fc5c-9dfe-0645d52cb515 

Response

The successful operation returns the 200 OK status code and a payload similar to the following:

{ \"_links\": { \"curies\": { \"href\": \"http://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-gatewayresponse-{rel}.html\", \"name\": \"gatewayresponse\", \"templated\": true }, \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses\" }, \"first\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses\" }, \"gatewayresponse:by-type\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"item\": [ { \"href\": \"/restapis/o81lxisefl/gatewayresponses/INTEGRATION_FAILURE\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/RESOURCE_NOT_FOUND\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/REQUEST_TOO_LARGE\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/THROTTLED\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/UNSUPPORTED_MEDIA_TYPE\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/AUTHORIZER_CONFIGURATION_ERROR\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/DEFAULT_5XX\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/DEFAULT_4XX\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/BAD_REQUEST_PARAMETERS\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/BAD_REQUEST_BODY\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/EXPIRED_TOKEN\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/ACCESS_DENIED\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/INVALID_API_KEY\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/UNAUTHORIZED\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/API_CONFIGURATION_ERROR\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/QUOTA_EXCEEDED\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/INTEGRATION_TIMEOUT\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/MISSING_AUTHENTICATION_TOKEN\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/INVALID_SIGNATURE\" }, { \"href\": \"/restapis/o81lxisefl/gatewayresponses/AUTHORIZER_FAILURE\" } ] }, \"_embedded\": { \"item\": [ { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/INTEGRATION_FAILURE\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/INTEGRATION_FAILURE\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"INTEGRATION_FAILURE\", \"statusCode\": \"504\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/RESOURCE_NOT_FOUND\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/RESOURCE_NOT_FOUND\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"RESOURCE_NOT_FOUND\", \"statusCode\": \"404\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/REQUEST_TOO_LARGE\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/REQUEST_TOO_LARGE\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"REQUEST_TOO_LARGE\", \"statusCode\": \"413\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/THROTTLED\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/THROTTLED\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"THROTTLED\", \"statusCode\": \"429\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/UNSUPPORTED_MEDIA_TYPE\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/UNSUPPORTED_MEDIA_TYPE\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"UNSUPPORTED_MEDIA_TYPE\", \"statusCode\": \"415\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/AUTHORIZER_CONFIGURATION_ERROR\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/AUTHORIZER_CONFIGURATION_ERROR\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"AUTHORIZER_CONFIGURATION_ERROR\", \"statusCode\": \"500\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/DEFAULT_5XX\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/DEFAULT_5XX\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"DEFAULT_5XX\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/DEFAULT_4XX\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/DEFAULT_4XX\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"DEFAULT_4XX\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/BAD_REQUEST_PARAMETERS\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/BAD_REQUEST_PARAMETERS\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"BAD_REQUEST_PARAMETERS\", \"statusCode\": \"400\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/BAD_REQUEST_BODY\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/BAD_REQUEST_BODY\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"BAD_REQUEST_BODY\", \"statusCode\": \"400\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/EXPIRED_TOKEN\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/EXPIRED_TOKEN\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"EXPIRED_TOKEN\", \"statusCode\": \"403\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/ACCESS_DENIED\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/ACCESS_DENIED\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"ACCESS_DENIED\", \"statusCode\": \"403\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/INVALID_API_KEY\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/INVALID_API_KEY\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"INVALID_API_KEY\", \"statusCode\": \"403\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/UNAUTHORIZED\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/UNAUTHORIZED\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"UNAUTHORIZED\", \"statusCode\": \"401\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/API_CONFIGURATION_ERROR\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/API_CONFIGURATION_ERROR\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"API_CONFIGURATION_ERROR\", \"statusCode\": \"500\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/QUOTA_EXCEEDED\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/QUOTA_EXCEEDED\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"QUOTA_EXCEEDED\", \"statusCode\": \"429\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/INTEGRATION_TIMEOUT\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/INTEGRATION_TIMEOUT\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"INTEGRATION_TIMEOUT\", \"statusCode\": \"504\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/MISSING_AUTHENTICATION_TOKEN\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/MISSING_AUTHENTICATION_TOKEN\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"MISSING_AUTHENTICATION_TOKEN\", \"statusCode\": \"403\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/INVALID_SIGNATURE\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/INVALID_SIGNATURE\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"INVALID_SIGNATURE\", \"statusCode\": \"403\" }, { \"_links\": { \"self\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/AUTHORIZER_FAILURE\" }, \"gatewayresponse:put\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/{response_type}\", \"templated\": true }, \"gatewayresponse:update\": { \"href\": \"/restapis/o81lxisefl/gatewayresponses/AUTHORIZER_FAILURE\" } }, \"defaultResponse\": true, \"responseParameters\": {}, \"responseTemplates\": { \"application/json\": \"{\\\"message\\\":$context.error.messageString}\" }, \"responseType\": \"AUTHORIZER_FAILURE\", \"statusCode\": \"500\" } ] } }

" }, "GenerateClientCertificateRequest":{ "type":"structure", @@ -4983,7 +4987,7 @@ }, "uri":{ "shape":"String", - "documentation":"

Specifies Uniform Resource Identifier (URI) of the integration endpoint.

  • For HTTP or HTTP_PROXY integrations, the URI must be a fully formed, encoded HTTP(S) URL according to the RFC-3986 specification, for either standard integration, where connectionType is not VPC_LINK, or private integration, where connectionType is VPC_LINK. For a private HTTP integration, the URI is not used for routing.

  • For AWS or AWS_PROXY integrations, the URI is of the form arn:aws:apigateway:{region}:{subdomain.service|service}:path|action/{service_api}. Here, {Region} is the API Gateway region (e.g., us-east-1); {service} is the name of the integrated AWS service (e.g., s3); and {subdomain} is a designated subdomain supported by certain AWS service for fast host-name lookup. action can be used for an AWS service action-based API, using an Action={name}&{p1}={v1}&p2={v2}... query string. The ensuing {service_api} refers to a supported action {name} plus any required input parameters. Alternatively, path can be used for an AWS service path-based API. The ensuing service_api refers to the path to an AWS service resource, including the region of the integrated AWS service, if applicable. For example, for integration with the S3 API of GetObject, the uri can be either arn:aws:apigateway:us-west-2:s3:action/GetObject&Bucket={bucket}&Key={key} or arn:aws:apigateway:us-west-2:s3:path/{bucket}/{key}

" + "documentation":"

Specifies Uniform Resource Identifier (URI) of the integration endpoint.

  • For HTTP or HTTP_PROXY integrations, the URI must be a fully formed, encoded HTTP(S) URL according to the RFC-3986 specification, for either standard integration, where connectionType is not VPC_LINK, or private integration, where connectionType is VPC_LINK. For a private HTTP integration, the URI is not used for routing.

  • For AWS or AWS_PROXY integrations, the URI is of the form arn:aws:apigateway:{region}:{subdomain.service|service}:path|action/{service_api}. Here, {Region} is the API Gateway region (e.g., us-east-1); {service} is the name of the integrated AWS service (e.g., s3); and {subdomain} is a designated subdomain supported by certain AWS service for fast host-name lookup. action can be used for an AWS service action-based API, using an Action={name}&{p1}={v1}&p2={v2}... query string. The ensuing {service_api} refers to a supported action {name} plus any required input parameters. Alternatively, path can be used for an AWS service path-based API. The ensuing service_api refers to the path to an AWS service resource, including the region of the integrated AWS service, if applicable. For example, for integration with the S3 API of GetObject, the uri can be either arn:aws:apigateway:us-west-2:s3:action/GetObject&Bucket={bucket}&Key={key} or arn:aws:apigateway:us-west-2:s3:path/{bucket}/{key}

" }, "connectionType":{ "shape":"ConnectionType", @@ -5574,7 +5578,7 @@ }, "uri":{ "shape":"String", - "documentation":"

Specifies Uniform Resource Identifier (URI) of the integration endpoint.

  • For HTTP or HTTP_PROXY integrations, the URI must be a fully formed, encoded HTTP(S) URL according to the RFC-3986 specification, for either standard integration, where connectionType is not VPC_LINK, or private integration, where connectionType is VPC_LINK. For a private HTTP integration, the URI is not used for routing.

  • For AWS or AWS_PROXY integrations, the URI is of the form arn:aws:apigateway:{region}:{subdomain.service|service}:path|action/{service_api}. Here, {Region} is the API Gateway region (e.g., us-east-1); {service} is the name of the integrated AWS service (e.g., s3); and {subdomain} is a designated subdomain supported by certain AWS service for fast host-name lookup. action can be used for an AWS service action-based API, using an Action={name}&{p1}={v1}&p2={v2}... query string. The ensuing {service_api} refers to a supported action {name} plus any required input parameters. Alternatively, path can be used for an AWS service path-based API. The ensuing service_api refers to the path to an AWS service resource, including the region of the integrated AWS service, if applicable. For example, for integration with the S3 API of GetObject, the uri can be either arn:aws:apigateway:us-west-2:s3:action/GetObject&Bucket={bucket}&Key={key} or arn:aws:apigateway:us-west-2:s3:path/{bucket}/{key}

" + "documentation":"

Specifies Uniform Resource Identifier (URI) of the integration endpoint.

  • For HTTP or HTTP_PROXY integrations, the URI must be a fully formed, encoded HTTP(S) URL according to the RFC-3986 specification, for either standard integration, where connectionType is not VPC_LINK, or private integration, where connectionType is VPC_LINK. For a private HTTP integration, the URI is not used for routing.

  • For AWS or AWS_PROXY integrations, the URI is of the form arn:aws:apigateway:{region}:{subdomain.service|service}:path|action/{service_api}. Here, {Region} is the API Gateway region (e.g., us-east-1); {service} is the name of the integrated AWS service (e.g., s3); and {subdomain} is a designated subdomain supported by certain AWS service for fast host-name lookup. action can be used for an AWS service action-based API, using an Action={name}&{p1}={v1}&p2={v2}... query string. The ensuing {service_api} refers to a supported action {name} plus any required input parameters. Alternatively, path can be used for an AWS service path-based API. The ensuing service_api refers to the path to an AWS service resource, including the region of the integrated AWS service, if applicable. For example, for integration with the S3 API of GetObject, the uri can be either arn:aws:apigateway:us-west-2:s3:action/GetObject&Bucket={bucket}&Key={key} or arn:aws:apigateway:us-west-2:s3:path/{bucket}/{key}

" }, "connectionType":{ "shape":"ConnectionType", @@ -5958,19 +5962,23 @@ }, "apiKeySource":{ "shape":"ApiKeySourceType", - "documentation":"

The source of the API key for metering requests according to a usage plan. Valid values are:

  • HEADER to read the API key from the X-API-Key header of a request.
  • AUTHORIZER to read the API key from the UsageIdentifierKey from a custom authorizer.

" + "documentation":"

The source of the API key for metering requests according to a usage plan. Valid values are:

  • HEADER to read the API key from the X-API-Key header of a request.
  • AUTHORIZER to read the API key from the UsageIdentifierKey from a custom authorizer.

" }, "endpointConfiguration":{ "shape":"EndpointConfiguration", - "documentation":"

The endpoint configuration of this RestApi showing the endpoint types of the API.

" + "documentation":"

The endpoint configuration of this RestApi showing the endpoint types of the API.

" }, "policy":{ "shape":"String", - "documentation":"A stringified JSON policy document that applies to this RestApi regardless of the caller and Method configuration." + "documentation":"

A stringified JSON policy document that applies to this RestApi regardless of the caller and Method configuration.

" }, "tags":{ "shape":"MapOfStringToString", "documentation":"

The collection of tags. Each tag element is associated with a given resource.

" + }, + "disableExecuteApiEndpoint":{ + "shape":"Boolean", + "documentation":"

Specifies whether clients can invoke your API by using the default execute-api endpoint. By default, clients can invoke your API with the default https://{api_id}.execute-api.{region}.amazonaws.com endpoint. To require that clients use a custom domain name to invoke your API, disable the default endpoint.

" } }, "documentation":"

Represents a REST API.

" @@ -7146,7 +7154,7 @@ }, "status":{ "shape":"VpcLinkStatus", - "documentation":"

The status of the VPC link. The valid values are AVAILABLE, PENDING, DELETING, or FAILED. Deploying an API will wait if the status is PENDING and will fail if the status is DELETING.

" + "documentation":"

The status of the VPC link. The valid values are AVAILABLE, PENDING, DELETING, or FAILED. Deploying an API will wait if the status is PENDING and will fail if the status is DELETING.

" }, "statusMessage":{ "shape":"String", @@ -7157,7 +7165,7 @@ "documentation":"

The collection of tags. Each tag element is associated with a given resource.

" } }, - "documentation":"

An API Gateway VPC link for a RestApi to access resources in an Amazon Virtual Private Cloud (VPC).

To enable access to a resource in an Amazon Virtual Private Cloud through Amazon API Gateway, you, as an API developer, create a VpcLink resource targeted for one or more network load balancers of the VPC and then integrate an API method with a private integration that uses the VpcLink. The private integration has an integration type of HTTP or HTTP_PROXY and has a connection type of VPC_LINK. The integration uses the connectionId property to identify the VpcLink used.

" + "documentation":"

An API Gateway VPC link for a RestApi to access resources in an Amazon Virtual Private Cloud (VPC).

To enable access to a resource in an Amazon Virtual Private Cloud through Amazon API Gateway, you, as an API developer, create a VpcLink resource targeted for one or more network load balancers of the VPC and then integrate an API method with a private integration that uses the VpcLink. The private integration has an integration type of HTTP or HTTP_PROXY and has a connection type of VPC_LINK. The integration uses the connectionId property to identify the VpcLink used.

" }, "VpcLinkStatus":{ "type":"string", diff --git a/services/apigatewaymanagementapi/pom.xml b/services/apigatewaymanagementapi/pom.xml index 4eff9d3ec8d8..3b10e1157aa0 100644 --- a/services/apigatewaymanagementapi/pom.xml +++ b/services/apigatewaymanagementapi/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT apigatewaymanagementapi AWS Java SDK :: Services :: ApiGatewayManagementApi diff --git a/services/apigatewayv2/pom.xml b/services/apigatewayv2/pom.xml index e2dc0f6c8658..5805605f16aa 100644 --- a/services/apigatewayv2/pom.xml +++ b/services/apigatewayv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT apigatewayv2 AWS Java SDK :: Services :: ApiGatewayV2 diff --git a/services/appconfig/pom.xml b/services/appconfig/pom.xml index 81b82c652032..da184118e214 100644 --- a/services/appconfig/pom.xml +++ b/services/appconfig/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT appconfig AWS Java SDK :: Services :: AppConfig diff --git a/services/appflow/pom.xml b/services/appflow/pom.xml index 3f68bc6d19f5..0011992ae079 100644 --- a/services/appflow/pom.xml +++ b/services/appflow/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT appflow AWS Java SDK :: Services :: Appflow diff --git a/services/appflow/src/main/resources/codegen-resources/service-2.json b/services/appflow/src/main/resources/codegen-resources/service-2.json index fa8e5e28ddaa..b1c0d1dd5f7f 100644 --- a/services/appflow/src/main/resources/codegen-resources/service-2.json +++ b/services/appflow/src/main/resources/codegen-resources/service-2.json @@ -420,6 +420,13 @@ "max":512, "pattern":".*" }, + "ClientCredentialsArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"arn:aws:secretsmanager:.*:[0-9]+:.*", + "sensitive":true + }, "ClientId":{ "type":"string", "max":512, @@ -632,6 +639,10 @@ "EventBridge":{ "shape":"EventBridgeMetadata", "documentation":"

The connector metadata specific to Amazon EventBridge.

" + }, + "Upsolver":{ + "shape":"UpsolverMetadata", + "documentation":"

The connector metadata specific to Upsolver.

" } }, "documentation":"

A structure to specify connector-specific metadata such as oAuthScopes, supportedRegions, privateLinkServiceUrl, and so on.

" @@ -948,7 +959,8 @@ "Infornexus", "Amplitude", "Veeva", - "EventBridge" + "EventBridge", + "Upsolver" ] }, "ConnectorTypeList":{ @@ -1138,6 +1150,11 @@ "documentation":"

The properties that are applied when Datadog is being used as a source.

" }, "Date":{"type":"timestamp"}, + "DatetimeTypeFieldName":{ + "type":"string", + "max":256, + "pattern":".*" + }, "DeleteConnectorProfileRequest":{ "type":"structure", "required":["connectorProfileName"], @@ -1401,6 +1418,10 @@ "EventBridge":{ "shape":"EventBridgeDestinationProperties", "documentation":"

The properties required to query Amazon EventBridge.

" + }, + "Upsolver":{ + "shape":"UpsolverDestinationProperties", + "documentation":"

The properties required to query Upsolver.

" } }, "documentation":"

This stores the information that is required to query a particular connector.

" @@ -1424,6 +1445,14 @@ "isUpsertable":{ "shape":"Boolean", "documentation":"

Specifies if the flow run can either insert new rows in the destination field if they do not already exist, or update them if they do.

" + }, + "isUpdatable":{ + "shape":"Boolean", + "documentation":"

Specifies whether the field can be updated during an UPDATE or UPSERT write operation.

" + }, + "supportedWriteOperations":{ + "shape":"SupportedWriteOperationList", + "documentation":"

A list of supported write operations. For each write operation listed, this field can be used in idFieldNames when that write operation is present as a destination option.

" } }, "documentation":"

The properties that can be applied to a field when connector is being used as a destination.

" @@ -1809,7 +1838,7 @@ }, "clientSecret":{ "shape":"ClientSecret", - "documentation":"

The client secret used by the oauth client to authenticate to the authorization server.

" + "documentation":"

The client secret used by the OAuth client to authenticate to the authorization server.

" }, "accessToken":{ "shape":"AccessToken", @@ -1821,7 +1850,7 @@ }, "oAuthRequest":{ "shape":"ConnectorOAuthRequest", - "documentation":"

The oauth requirement needed to request security tokens from the connector endpoint.

" + "documentation":"

The OAuth requirement needed to request security tokens from the connector endpoint.

" } }, "documentation":"

The connector-specific profile credentials required by Google Analytics.

" @@ -1858,11 +1887,28 @@ "max":128, "pattern":"\\S+" }, + "IdFieldNameList":{ + "type":"list", + "member":{"shape":"Name"}, + "documentation":"

A list of field names that can be used as an ID field when performing a write operation.

", + "max":1, + "min":0 + }, "Identifier":{ "type":"string", "max":128, "pattern":"\\S+" }, + "IncrementalPullConfig":{ + "type":"structure", + "members":{ + "datetimeTypeFieldName":{ + "shape":"DatetimeTypeFieldName", + "documentation":"

A field that specifies the date time or timestamp field as the criteria to use when importing incremental records from the source.

" + } + }, + "documentation":"

Specifies the configuration used when importing incremental records from the source.

" + }, "InforNexusConnectorOperator":{ "type":"string", "enum":[ @@ -2078,7 +2124,7 @@ }, "clientSecret":{ "shape":"ClientSecret", - "documentation":"

The client secret used by the oauth client to authenticate to the authorization server.

" + "documentation":"

The client secret used by the OAuth client to authenticate to the authorization server.

" }, "accessToken":{ "shape":"AccessToken", @@ -2086,7 +2132,7 @@ }, "oAuthRequest":{ "shape":"ConnectorOAuthRequest", - "documentation":"

The oauth requirement needed to request security tokens from the connector endpoint.

" + "documentation":"

The OAuth requirement needed to request security tokens from the connector endpoint.

" } }, "documentation":"

The connector-specific profile credentials required by Marketo.

" @@ -2472,7 +2518,11 @@ }, "oAuthRequest":{ "shape":"ConnectorOAuthRequest", - "documentation":"

The oauth requirement needed to request security tokens from the connector endpoint.

" + "documentation":"

The OAuth requirement needed to request security tokens from the connector endpoint.

" + }, + "clientCredentialsArn":{ + "shape":"ClientCredentialsArn", + "documentation":"

The secret manager ARN, which contains the client ID and client secret of the connected app.

" } }, "documentation":"

The connector-specific profile credentials required when using Salesforce.

" @@ -2499,9 +2549,17 @@ "shape":"Object", "documentation":"

The object specified in the Salesforce flow destination.

" }, + "idFieldNames":{ + "shape":"IdFieldNameList", + "documentation":"

The name of the field that Amazon AppFlow uses as an ID when performing a write operation such as update or delete.

" + }, "errorHandlingConfig":{ "shape":"ErrorHandlingConfig", "documentation":"

The settings that determine how Amazon AppFlow handles an error when placing data in the Salesforce destination. For example, this setting would determine if the flow should fail after one insertion error, or continue and attempt to insert every record regardless of the initial failure. ErrorHandlingConfig is a part of the destination connector details.

" + }, + "writeOperationType":{ + "shape":"WriteOperationType", + "documentation":"

This specifies the type of write operation to be performed in Salesforce. When the value is UPSERT, then idFieldNames is required.

" } }, "documentation":"

The properties that are applied when Salesforce is being used as a destination.

" @@ -2557,7 +2615,7 @@ "members":{ "scheduleExpression":{ "shape":"ScheduleExpression", - "documentation":"

The scheduling expression that determines when and how often the rule runs.

" + "documentation":"

The scheduling expression that determines the rate at which the schedule will run, for example rate(5minutes).

" }, "dataPullMode":{ "shape":"DataPullMode", @@ -2759,7 +2817,7 @@ }, "clientSecret":{ "shape":"ClientSecret", - "documentation":"

The client secret used by the oauth client to authenticate to the authorization server.

" + "documentation":"

The client secret used by the OAuth client to authenticate to the authorization server.

" }, "accessToken":{ "shape":"AccessToken", @@ -2767,7 +2825,7 @@ }, "oAuthRequest":{ "shape":"ConnectorOAuthRequest", - "documentation":"

The oauth requirement needed to request security tokens from the connector endpoint.

" + "documentation":"

The OAuth requirement needed to request security tokens from the connector endpoint.

" } }, "documentation":"

The connector-specific profile credentials required when using Slack.

" @@ -2995,6 +3053,10 @@ "sourceConnectorProperties":{ "shape":"SourceConnectorProperties", "documentation":"

Specifies the information that is required to query a particular source connector.

" + }, + "incrementalPullConfig":{ + "shape":"IncrementalPullConfig", + "documentation":"

Defines the configuration for a scheduled incremental data pull. If a valid configuration is provided, the fields specified in the configuration are used when querying for the incremental data pull.

" } }, "documentation":"

Contains information about the configuration of the source connector used in the flow.

" @@ -3024,6 +3086,10 @@ "flowStatus":{ "shape":"FlowStatus", "documentation":"

Indicates the current status of the flow.

" + }, + "executionId":{ + "shape":"ExecutionId", + "documentation":"

Returns the internal execution ID of an on-demand flow when the flow is started. For scheduled or event-triggered flows, this value is null.

" } } }, @@ -3070,6 +3136,10 @@ "type":"list", "member":{"shape":"Value"} }, + "SupportedWriteOperationList":{ + "type":"list", + "member":{"shape":"WriteOperationType"} + }, "TagKey":{ "type":"string", "max":128, @@ -3171,7 +3241,8 @@ }, "Timezone":{ "type":"string", - "max":256 + "max":256, + "pattern":".*" }, "TrendmicroConnectorOperator":{ "type":"string", @@ -3375,6 +3446,53 @@ "max":256, "pattern":"\\S+" }, + "UpsolverBucketName":{ + "type":"string", + "max":63, + "min":16, + "pattern":"^(upsolver-appflow)\\S*" + }, + "UpsolverDestinationProperties":{ + "type":"structure", + "required":[ + "bucketName", + "s3OutputFormatConfig" + ], + "members":{ + "bucketName":{ + "shape":"UpsolverBucketName", + "documentation":"

The Upsolver Amazon S3 bucket name in which Amazon AppFlow places the transferred data.

" + }, + "bucketPrefix":{ + "shape":"BucketPrefix", + "documentation":"

The object key for the destination Upsolver Amazon S3 bucket in which Amazon AppFlow places the files.

" + }, + "s3OutputFormatConfig":{ + "shape":"UpsolverS3OutputFormatConfig", + "documentation":"

The configuration that determines how data is formatted when Upsolver is used as the flow destination.

" + } + }, + "documentation":"

The properties that are applied when Upsolver is used as a destination.

" + }, + "UpsolverMetadata":{ + "type":"structure", + "members":{ + }, + "documentation":"

The connector metadata specific to Upsolver.

" + }, + "UpsolverS3OutputFormatConfig":{ + "type":"structure", + "required":["prefixConfig"], + "members":{ + "fileType":{ + "shape":"FileType", + "documentation":"

Indicates the file type that Amazon AppFlow places in the Upsolver Amazon S3 bucket.

" + }, + "prefixConfig":{"shape":"PrefixConfig"}, + "aggregationConfig":{"shape":"AggregationConfig"} + }, + "documentation":"

The configuration that determines how Amazon AppFlow formats the flow output data when Upsolver is used as the destination.

" + }, "Username":{ "type":"string", "max":512, @@ -3471,6 +3589,15 @@ "max":512, "pattern":"[\\s\\w/!@#+=.-]*" }, + "WriteOperationType":{ + "type":"string", + "documentation":"

The possible write operations in the destination connector. When this value is not provided, this defaults to the INSERT operation.

", + "enum":[ + "INSERT", + "UPSERT", + "UPDATE" + ] + }, "ZendeskConnectorOperator":{ "type":"string", "enum":[ @@ -3503,7 +3630,7 @@ }, "clientSecret":{ "shape":"ClientSecret", - "documentation":"

The client secret used by the oauth client to authenticate to the authorization server.

" + "documentation":"

The client secret used by the OAuth client to authenticate to the authorization server.

" }, "accessToken":{ "shape":"AccessToken", @@ -3511,7 +3638,7 @@ }, "oAuthRequest":{ "shape":"ConnectorOAuthRequest", - "documentation":"

The oauth requirement needed to request security tokens from the connector endpoint.

" + "documentation":"

The OAuth requirement needed to request security tokens from the connector endpoint.

" } }, "documentation":"

The connector-specific profile credentials required when using Zendesk.

" diff --git a/services/appintegrations/pom.xml b/services/appintegrations/pom.xml new file mode 100644 index 000000000000..74e86092d2f1 --- /dev/null +++ b/services/appintegrations/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.40-SNAPSHOT + + appintegrations + AWS Java SDK :: Services :: App Integrations + The AWS Java SDK for App Integrations module holds the client classes that are used for + communicating with App Integrations. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.appintegrations + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/appintegrations/src/main/resources/codegen-resources/paginators-1.json b/services/appintegrations/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..5677bd8e4a2d --- /dev/null +++ b/services/appintegrations/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,4 @@ +{ + "pagination": { + } +} diff --git a/services/appintegrations/src/main/resources/codegen-resources/service-2.json b/services/appintegrations/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..a35f0586f70b --- /dev/null +++ b/services/appintegrations/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,675 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2020-07-29", + "endpointPrefix":"app-integrations", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"Amazon AppIntegrations Service", + "serviceId":"AppIntegrations", + "signatureVersion":"v4", + "signingName":"app-integrations", + "uid":"appintegrations-2020-07-29" + }, + "operations":{ + "CreateEventIntegration":{ + "name":"CreateEventIntegration", + "http":{ + "method":"POST", + "requestUri":"/eventIntegrations" + }, + "input":{"shape":"CreateEventIntegrationRequest"}, + "output":{"shape":"CreateEventIntegrationResponse"}, + "errors":[ + {"shape":"InternalServiceError"}, + {"shape":"ResourceQuotaExceededException"}, + {"shape":"DuplicateResourceException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidRequestException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

The Amazon AppIntegrations APIs are in preview release and are subject to change.

Creates an EventIntegration, given a specified name, description, and a reference to an Amazon Eventbridge bus in your account and a partner event source that will push events to that bus. No objects are created in the your account, only metadata that is persisted on the EventIntegration control plane.

" + }, + "DeleteEventIntegration":{ + "name":"DeleteEventIntegration", + "http":{ + "method":"DELETE", + "requestUri":"/eventIntegrations/{Name}" + }, + "input":{"shape":"DeleteEventIntegrationRequest"}, + "output":{"shape":"DeleteEventIntegrationResponse"}, + "errors":[ + {"shape":"InternalServiceError"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidRequestException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

The Amazon AppIntegrations APIs are in preview release and are subject to change.

Deletes the specified existing event integration. If the event integration is associated with clients, the request is rejected.

" + }, + "GetEventIntegration":{ + "name":"GetEventIntegration", + "http":{ + "method":"GET", + "requestUri":"/eventIntegrations/{Name}" + }, + "input":{"shape":"GetEventIntegrationRequest"}, + "output":{"shape":"GetEventIntegrationResponse"}, + "errors":[ + {"shape":"InternalServiceError"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidRequestException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

The Amazon AppIntegrations APIs are in preview release and are subject to change.

Return information about the event integration.

" + }, + "ListEventIntegrationAssociations":{ + "name":"ListEventIntegrationAssociations", + "http":{ + "method":"GET", + "requestUri":"/eventIntegrations/{Name}/associations" + }, + "input":{"shape":"ListEventIntegrationAssociationsRequest"}, + "output":{"shape":"ListEventIntegrationAssociationsResponse"}, + "errors":[ + {"shape":"InternalServiceError"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidRequestException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

The Amazon AppIntegrations APIs are in preview release and are subject to change.

Returns a paginated list of event integration associations in the account.

" + }, + "ListEventIntegrations":{ + "name":"ListEventIntegrations", + "http":{ + "method":"GET", + "requestUri":"/eventIntegrations" + }, + "input":{"shape":"ListEventIntegrationsRequest"}, + "output":{"shape":"ListEventIntegrationsResponse"}, + "errors":[ + {"shape":"InternalServiceError"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidRequestException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

The Amazon AppIntegrations APIs are in preview release and are subject to change.

Returns a paginated list of event integrations in the account.

" + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServiceError"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

The Amazon AppIntegrations APIs are in preview release and are subject to change.

Lists the tags for the specified resource.

" + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServiceError"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

The Amazon AppIntegrations APIs are in preview release and are subject to change.

Adds the specified tags to the specified resource.

" + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServiceError"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

The Amazon AppIntegrations APIs are in preview release and are subject to change.

Removes the specified tags from the specified resource.

" + }, + "UpdateEventIntegration":{ + "name":"UpdateEventIntegration", + "http":{ + "method":"PATCH", + "requestUri":"/eventIntegrations/{Name}" + }, + "input":{"shape":"UpdateEventIntegrationRequest"}, + "output":{"shape":"UpdateEventIntegrationResponse"}, + "errors":[ + {"shape":"InternalServiceError"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidRequestException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

The Amazon AppIntegrations APIs are in preview release and are subject to change.

Updates the description of an event integration.

" + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

You do not have sufficient access to perform this action.

", + "error":{"httpStatusCode":403}, + "exception":true + }, + "Arn":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"^arn:aws:[A-Za-z0-9][A-Za-z0-9_/.-]{0,62}:[A-Za-z0-9_/.-]{0,63}:[A-Za-z0-9_/.-]{0,63}:[A-Za-z0-9][A-Za-z0-9:_/+=,@.-]{0,1023}$" + }, + "ClientAssociationMetadata":{ + "type":"map", + "key":{"shape":"NonBlankString"}, + "value":{"shape":"NonBlankString"} + }, + "ClientId":{ + "type":"string", + "max":255, + "min":1, + "pattern":".*" + }, + "CreateEventIntegrationRequest":{ + "type":"structure", + "required":[ + "Name", + "EventFilter", + "EventBridgeBus" + ], + "members":{ + "Name":{ + "shape":"Name", + "documentation":"

The name of the event integration.

" + }, + "Description":{ + "shape":"Description", + "documentation":"

The description of the event integration.

" + }, + "EventFilter":{ + "shape":"EventFilter", + "documentation":"

The event filter.

" + }, + "EventBridgeBus":{ + "shape":"EventBridgeBus", + "documentation":"

The Eventbridge bus.

" + }, + "ClientToken":{ + "shape":"IdempotencyToken", + "documentation":"

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

", + "idempotencyToken":true + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

One or more tags.

" + } + } + }, + "CreateEventIntegrationResponse":{ + "type":"structure", + "members":{ + "EventIntegrationArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the event integration.

" + } + } + }, + "DeleteEventIntegrationRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"Name", + "documentation":"

The name of the event integration.

", + "location":"uri", + "locationName":"Name" + } + } + }, + "DeleteEventIntegrationResponse":{ + "type":"structure", + "members":{ + } + }, + "Description":{ + "type":"string", + "max":1000, + "min":1, + "pattern":".*" + }, + "DuplicateResourceException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

A resource with the specified name already exists.

", + "error":{"httpStatusCode":409}, + "exception":true + }, + "EventBridgeBus":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^[a-zA-Z0-9\\/\\._\\-]+$" + }, + "EventBridgeRuleName":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"^[a-zA-Z0-9\\/\\._\\-]+$" + }, + "EventFilter":{ + "type":"structure", + "required":["Source"], + "members":{ + "Source":{ + "shape":"Source", + "documentation":"

The source of the events.

" + } + }, + "documentation":"

The Amazon AppIntegrations APIs are in preview release and are subject to change.

The event filter.

" + }, + "EventIntegration":{ + "type":"structure", + "members":{ + "EventIntegrationArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the event integration.

" + }, + "Name":{ + "shape":"Name", + "documentation":"

The name of the event integration.

" + }, + "Description":{ + "shape":"Description", + "documentation":"

The event integration description.

" + }, + "EventFilter":{ + "shape":"EventFilter", + "documentation":"

The event integration filter.

" + }, + "EventBridgeBus":{ + "shape":"EventBridgeBus", + "documentation":"

The Amazon Eventbridge bus for the event integration.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

The tags.

" + } + }, + "documentation":"

The Amazon AppIntegrations APIs are in preview release and are subject to change.

The event integration.

" + }, + "EventIntegrationAssociation":{ + "type":"structure", + "members":{ + "EventIntegrationAssociationArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) for the event integration association.

" + }, + "EventIntegrationAssociationId":{ + "shape":"UUID", + "documentation":"

The identifier for the event integration association.

" + }, + "EventIntegrationName":{ + "shape":"Name", + "documentation":"

The name of the event integration.

" + }, + "ClientId":{ + "shape":"ClientId", + "documentation":"

The identifier for the client that is associated with the event integration.

" + }, + "EventBridgeRuleName":{ + "shape":"EventBridgeRuleName", + "documentation":"

The name of the Eventbridge rule.

" + }, + "ClientAssociationMetadata":{ + "shape":"ClientAssociationMetadata", + "documentation":"

The metadata associated with the client.

" + } + }, + "documentation":"

The Amazon AppIntegrations APIs are in preview release and are subject to change.

The event integration association.

" + }, + "EventIntegrationAssociationsList":{ + "type":"list", + "member":{"shape":"EventIntegrationAssociation"}, + "max":50, + "min":1 + }, + "EventIntegrationsList":{ + "type":"list", + "member":{"shape":"EventIntegration"}, + "max":50, + "min":1 + }, + "GetEventIntegrationRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"Name", + "documentation":"

The name of the event integration.

", + "location":"uri", + "locationName":"Name" + } + } + }, + "GetEventIntegrationResponse":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"Name", + "documentation":"

The name of the event integration.

" + }, + "Description":{ + "shape":"Description", + "documentation":"

The description of the event integration.

" + }, + "EventIntegrationArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) for the event integration.

" + }, + "EventBridgeBus":{ + "shape":"EventBridgeBus", + "documentation":"

The Eventbridge bus.

" + }, + "EventFilter":{ + "shape":"EventFilter", + "documentation":"

The event filter.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

One or more tags.

" + } + } + }, + "IdempotencyToken":{ + "type":"string", + "max":2048, + "min":1, + "pattern":".*" + }, + "InternalServiceError":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

Request processing failed due to an error or failure with the service.

", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "InvalidRequestException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

The request is not valid.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "ListEventIntegrationAssociationsRequest":{ + "type":"structure", + "required":["EventIntegrationName"], + "members":{ + "EventIntegrationName":{ + "shape":"Name", + "documentation":"

The name of the event integration.

", + "location":"uri", + "locationName":"Name" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to return per page.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListEventIntegrationAssociationsResponse":{ + "type":"structure", + "members":{ + "EventIntegrationAssociations":{ + "shape":"EventIntegrationAssociationsList", + "documentation":"

The event integration associations.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If there are additional results, this is the token for the next set of results.

" + } + } + }, + "ListEventIntegrationsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to return per page.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListEventIntegrationsResponse":{ + "type":"structure", + "members":{ + "EventIntegrations":{ + "shape":"EventIntegrationsList", + "documentation":"

The event integrations.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If there are additional results, this is the token for the next set of results.

" + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the resource.

", + "location":"uri", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "tags":{ + "shape":"TagMap", + "documentation":"

Information about the tags.

" + } + } + }, + "MaxResults":{ + "type":"integer", + "max":50, + "min":1 + }, + "Message":{"type":"string"}, + "Name":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^[a-zA-Z0-9\\/\\._\\-]+$" + }, + "NextToken":{ + "type":"string", + "max":1000, + "min":1, + "pattern":".*" + }, + "NonBlankString":{ + "type":"string", + "max":255, + "min":1, + "pattern":".*\\S.*" + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

The specified resource was not found.

", + "error":{"httpStatusCode":404}, + "exception":true + }, + "ResourceQuotaExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

The allowed quota for the resource has been exceeded.

", + "error":{"httpStatusCode":429}, + "exception":true + }, + "Source":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^aws\\.partner\\/.*$" + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^(?!aws:)[a-zA-Z+-=._:/]+$" + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":1 + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":200, + "min":1 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the resource.

", + "location":"uri", + "locationName":"resourceArn" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

One or more tags.

" + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256 + }, + "ThrottlingException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

The throttling limit has been exceeded.

", + "error":{"httpStatusCode":429}, + "exception":true + }, + "UUID":{ + "type":"string", + "pattern":"[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}" + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the resource.

", + "location":"uri", + "locationName":"resourceArn" + }, + "tagKeys":{ + "shape":"TagKeyList", + "documentation":"

The tag keys.

", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateEventIntegrationRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"Name", + "documentation":"

The name of the event integration.

", + "location":"uri", + "locationName":"Name" + }, + "Description":{ + "shape":"Description", + "documentation":"

The description of the event inegration.

" + } + } + }, + "UpdateEventIntegrationResponse":{ + "type":"structure", + "members":{ + } + } + }, + "documentation":"

The Amazon AppIntegrations APIs are in preview release and are subject to change.

The Amazon AppIntegrations service enables you to configure and reuse connections to external applications.

For information about how you can use external applications with Amazon Connect, see Set up pre-built integrations in the Amazon Connect Administrator Guide.

" +} diff --git a/services/applicationautoscaling/pom.xml b/services/applicationautoscaling/pom.xml index 40cfecdb20dc..c325c13913f0 100644 --- a/services/applicationautoscaling/pom.xml +++ b/services/applicationautoscaling/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT applicationautoscaling AWS Java SDK :: Services :: AWS Application Auto Scaling diff --git a/services/applicationdiscovery/pom.xml b/services/applicationdiscovery/pom.xml index b08bdeee9fbc..0c2bbbe1f1b0 100644 --- a/services/applicationdiscovery/pom.xml +++ b/services/applicationdiscovery/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT applicationdiscovery AWS Java SDK :: Services :: AWS Application Discovery Service diff --git a/services/applicationinsights/pom.xml b/services/applicationinsights/pom.xml index 1113a8d5849d..f0dcf5ff8437 100644 --- a/services/applicationinsights/pom.xml +++ b/services/applicationinsights/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT applicationinsights AWS Java SDK :: Services :: Application Insights diff --git a/services/applicationinsights/src/main/resources/codegen-resources/service-2.json b/services/applicationinsights/src/main/resources/codegen-resources/service-2.json index 1c847097a688..6780ae22c3bd 100644 --- a/services/applicationinsights/src/main/resources/codegen-resources/service-2.json +++ b/services/applicationinsights/src/main/resources/codegen-resources/service-2.json @@ -27,7 +27,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ValidationException"}, {"shape":"InternalServerException"}, - {"shape":"TagsAlreadyExistException"} + {"shape":"TagsAlreadyExistException"}, + {"shape":"AccessDeniedException"} ], "documentation":"

Adds an application that is created from a resource group.

" }, @@ -426,11 +427,20 @@ } }, "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMsg"} + }, + "documentation":"

User does not have permissions to perform this action.

", + "exception":true + }, "AffectedResource":{"type":"string"}, "AmazonResourceName":{ "type":"string", "max":1011, - "min":1 + "min":1, + "pattern":"^arn:aws(-\\w+)*:[\\w\\d-]+:([\\w\\d-]*)?:[\\w\\d_-]*([:/].+)*$" }, "ApplicationComponent":{ "type":"structure", @@ -439,10 +449,18 @@ "shape":"ComponentName", "documentation":"

The name of the component.

" }, + "ComponentRemarks":{ + "shape":"Remarks", + "documentation":"

If logging is supported for the resource type, indicates whether the component has configured logs to be monitored.

" + }, "ResourceType":{ "shape":"ResourceType", "documentation":"

The resource type. Supported resource types include EC2 instances, Auto Scaling group, Classic ELB, Application ELB, and SQS Queue.

" }, + "OsType":{ + "shape":"OsType", + "documentation":"

The operating system of the component.

" + }, "Tier":{ "shape":"Tier", "documentation":"

The stack tier of the application component.

" @@ -450,6 +468,10 @@ "Monitor":{ "shape":"Monitor", "documentation":"

Indicates whether the application component is monitored.

" + }, + "DetectedWorkload":{ + "shape":"DetectedWorkload", + "documentation":"

Workloads detected in the application component.

" } }, "documentation":"

Describes a standalone resource or similarly grouped resources that the application is made up of.

" @@ -508,7 +530,8 @@ "enum":[ "EC2", "CODE_DEPLOY", - "HEALTH" + "HEALTH", + "RDS" ] }, "CodeDeployApplication":{"type":"string"}, @@ -519,9 +542,15 @@ "ComponentConfiguration":{ "type":"string", "max":10000, - "min":1 + "min":1, + "pattern":"[\\S\\s]+" + }, + "ComponentName":{ + "type":"string", + "max":1011, + "min":1, + "pattern":"(?:^[\\d\\w\\-_\\.+]*$)|(?:^arn:aws(-\\w+)*:[\\w\\d-]+:([\\w\\d-]*)?:[\\w\\d_-]*([:/].+)*$)" }, - "ComponentName":{"type":"string"}, "ConfigurationEvent":{ "type":"structure", "members":{ @@ -563,6 +592,7 @@ "type":"string", "enum":[ "CLOUDWATCH_ALARM", + "CLOUDWATCH_LOG", "CLOUDFORMATION", "SSM_ASSOCIATION" ] @@ -624,7 +654,7 @@ "documentation":"

The name of the resource group.

" }, "ComponentName":{ - "shape":"ComponentName", + "shape":"CustomComponentName", "documentation":"

The name of the component.

" }, "ResourceList":{ @@ -662,11 +692,11 @@ }, "Pattern":{ "shape":"LogPatternRegex", - "documentation":"

The log pattern.

" + "documentation":"

The log pattern. The pattern must be DFA compatible. Patterns that utilize forward lookahead or backreference constructions are not supported.

" }, "Rank":{ "shape":"LogPatternRank", - "documentation":"

Rank of the log pattern.

" + "documentation":"

Rank of the log pattern. Must be a value between 1 and 1,000,000. The patterns are sorted by rank, so we recommend that you set your highest priority patterns with the lowest rank. A pattern of rank 1 will be the first to get matched to a log line. A pattern of rank 1,000,000 will be last to get matched. When you configure custom log patterns from the console, a Low severity pattern translates to a 750,000 rank. A Medium severity pattern translates to a 500,000 rank. And a High severity pattern translates to a 250,000 rank. Rank values less than 1 or greater than 1,000,000 are reserved for AWS-provided patterns.

" } } }, @@ -683,6 +713,12 @@ } } }, + "CustomComponentName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[\\d\\w\\-_\\.+]*$" + }, "DeleteApplicationRequest":{ "type":"structure", "required":["ResourceGroupName"], @@ -710,7 +746,7 @@ "documentation":"

The name of the resource group.

" }, "ComponentName":{ - "shape":"ComponentName", + "shape":"CustomComponentName", "documentation":"

The name of the component.

" } } @@ -950,6 +986,15 @@ } } }, + "DetectedWorkload":{ + "type":"map", + "key":{"shape":"Tier"}, + "value":{"shape":"WorkloadMetaData"} + }, + "EbsCause":{"type":"string"}, + "EbsEvent":{"type":"string"}, + "EbsRequestId":{"type":"string"}, + "EbsResult":{"type":"string"}, "Ec2State":{"type":"string"}, "EndTime":{"type":"timestamp"}, "ErrorMsg":{"type":"string"}, @@ -1232,19 +1277,19 @@ "members":{ "PatternSetName":{ "shape":"LogPatternSetName", - "documentation":"

The name of the log pattern. A log pattern name can contains at many as 30 characters, and it cannot be empty. The characters can be Unicode letters, digits or one of the following symbols: period, dash, underscore.

" + "documentation":"

The name of the log pattern. A log pattern name can contain as many as 30 characters, and it cannot be empty. The characters can be Unicode letters, digits, or one of the following symbols: period, dash, underscore.

" }, "PatternName":{ "shape":"LogPatternName", - "documentation":"

The name of the log pattern. A log pattern name can contains at many as 50 characters, and it cannot be empty. The characters can be Unicode letters, digits or one of the following symbols: period, dash, underscore.

" + "documentation":"

The name of the log pattern. A log pattern name can contain as many as 50 characters, and it cannot be empty. The characters can be Unicode letters, digits, or one of the following symbols: period, dash, underscore.

" }, "Pattern":{ "shape":"LogPatternRegex", - "documentation":"

A regular expression that defines the log pattern. A log pattern can contains at many as 50 characters, and it cannot be empty.

" + "documentation":"

A regular expression that defines the log pattern. A log pattern can contain as many as 50 characters, and it cannot be empty. The pattern must be DFA compatible. Patterns that utilize forward lookahead or backreference constructions are not supported.

" }, "Rank":{ "shape":"LogPatternRank", - "documentation":"

Rank of the log pattern.

" + "documentation":"

Rank of the log pattern. Must be a value between 1 and 1,000,000. The patterns are sorted by rank, so we recommend that you set your highest priority patterns with the lowest rank. A pattern of rank 1 will be the first to get matched to a log line. A pattern of rank 1,000,000 will be last to get matched. When you configure custom log patterns from the console, a Low severity pattern translates to a 750,000 rank. A Medium severity pattern translates to a 500,000 rank. And a High severity pattern translates to a 250,000 rank. Rank values less than 1 or greater than 1,000,000 are reserved for AWS-provided patterns.

" } }, "documentation":"

An object that defines the log patterns that belongs to a LogPatternSet.

" @@ -1263,7 +1308,8 @@ "LogPatternRegex":{ "type":"string", "max":50, - "min":1 + "min":1, + "pattern":"[\\S\\s]+" }, "LogPatternSetList":{ "type":"list", @@ -1281,10 +1327,11 @@ "max":40, "min":1 }, + "MetaDataKey":{"type":"string"}, + "MetaDataValue":{"type":"string"}, "MetricName":{"type":"string"}, "MetricNamespace":{"type":"string"}, "Monitor":{"type":"boolean"}, - "NewComponentName":{"type":"string"}, "Observation":{ "type":"structure", "members":{ @@ -1396,6 +1443,50 @@ "shape":"Ec2State", "documentation":"

The state of the instance, such as STOPPING or TERMINATING.

" }, + "RdsEventCategories":{ + "shape":"RdsEventCategories", + "documentation":"

The category of an RDS event.

" + }, + "RdsEventMessage":{ + "shape":"RdsEventMessage", + "documentation":"

The message of an RDS event.

" + }, + "S3EventName":{ + "shape":"S3EventName", + "documentation":"

The name of the S3 CloudWatch Event-based observation.

" + }, + "StatesExecutionArn":{ + "shape":"StatesExecutionArn", + "documentation":"

The Amazon Resource Name (ARN) of the step function execution-based observation.

" + }, + "StatesArn":{ + "shape":"StatesArn", + "documentation":"

The Amazon Resource Name (ARN) of the step function-based observation.

" + }, + "StatesStatus":{ + "shape":"StatesStatus", + "documentation":"

The status of the step function-related observation.

" + }, + "StatesInput":{ + "shape":"StatesInput", + "documentation":"

The input to the step function-based observation.

" + }, + "EbsEvent":{ + "shape":"EbsEvent", + "documentation":"

The type of EBS CloudWatch event, such as createVolume, deleteVolume or attachVolume.

" + }, + "EbsResult":{ + "shape":"EbsResult", + "documentation":"

The result of an EBS CloudWatch event, such as failed or succeeded.

" + }, + "EbsCause":{ + "shape":"EbsCause", + "documentation":"

The cause of an EBS CloudWatch event.

" + }, + "EbsRequestId":{ + "shape":"EbsRequestId", + "documentation":"

The request ID of an EBS CloudWatch event.

" + }, "XRayFaultPercent":{ "shape":"XRayFaultPercent", "documentation":"

The X-Ray request fault percentage for this node.

" @@ -1441,9 +1532,22 @@ "OpsItemSNSTopicArn":{ "type":"string", "max":300, - "min":20 + "min":20, + "pattern":"^arn:aws(-\\w+)*:[\\w\\d-]+:([\\w\\d-]*)?:[\\w\\d_-]*([:/].+)*$" + }, + "OsType":{ + "type":"string", + "enum":[ + "WINDOWS", + "LINUX" + ] + }, + "PaginationToken":{ + "type":"string", + "max":1024, + "min":1, + "pattern":".+" }, - "PaginationToken":{"type":"string"}, "Problem":{ "type":"structure", "members":{ @@ -1500,6 +1604,8 @@ "type":"list", "member":{"shape":"Problem"} }, + "RdsEventCategories":{"type":"string"}, + "RdsEventMessage":{"type":"string"}, "RelatedObservations":{ "type":"structure", "members":{ @@ -1515,7 +1621,8 @@ "ResourceARN":{ "type":"string", "max":1011, - "min":1 + "min":1, + "pattern":"^arn:aws(-\\w+)*:[\\w\\d-]+:([\\w\\d-]*)?:[\\w\\d_-]*([:/].+)*$" }, "ResourceGroupName":{ "type":"string", @@ -1543,7 +1650,13 @@ "documentation":"

The resource does not exist in the customer account.

", "exception":true }, - "ResourceType":{"type":"string"}, + "ResourceType":{ + "type":"string", + "max":50, + "min":1, + "pattern":"[0-9a-zA-Z:_]*" + }, + "S3EventName":{"type":"string"}, "SeverityLevel":{ "type":"string", "enum":[ @@ -1555,6 +1668,10 @@ "SourceARN":{"type":"string"}, "SourceType":{"type":"string"}, "StartTime":{"type":"timestamp"}, + "StatesArn":{"type":"string"}, + "StatesExecutionArn":{"type":"string"}, + "StatesInput":{"type":"string"}, + "StatesStatus":{"type":"string"}, "Status":{ "type":"string", "enum":[ @@ -1584,7 +1701,8 @@ "TagKey":{ "type":"string", "max":128, - "min":1 + "min":1, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" }, "TagKeyList":{ "type":"list", @@ -1623,7 +1741,8 @@ "TagValue":{ "type":"string", "max":256, - "min":0 + "min":0, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" }, "TagsAlreadyExistException":{ "type":"structure", @@ -1636,11 +1755,18 @@ "Tier":{ "type":"string", "enum":[ + "CUSTOM", "DEFAULT", "DOT_NET_CORE", "DOT_NET_WORKER", + "DOT_NET_WEB_TIER", "DOT_NET_WEB", - "SQL_SERVER" + "SQL_SERVER", + "SQL_SERVER_ALWAYSON_AVAILABILITY_GROUP", + "MYSQL", + "POSTGRESQL", + "JAVA_JMX", + "ORACLE" ], "max":50, "min":1 @@ -1762,11 +1888,11 @@ "documentation":"

The name of the resource group.

" }, "ComponentName":{ - "shape":"ComponentName", + "shape":"CustomComponentName", "documentation":"

The name of the component.

" }, "NewComponentName":{ - "shape":"NewComponentName", + "shape":"CustomComponentName", "documentation":"

The new name of the component.

" }, "ResourceList":{ @@ -1802,11 +1928,11 @@ }, "Pattern":{ "shape":"LogPatternRegex", - "documentation":"

The log pattern.

" + "documentation":"

The log pattern. The pattern must be DFA compatible. Patterns that utilize forward lookahead or backreference constructions are not supported.

" }, "Rank":{ "shape":"LogPatternRank", - "documentation":"

Rank of the log pattern.

" + "documentation":"

Rank of the log pattern. Must be a value between 1 and 1,000,000. The patterns are sorted by rank, so we recommend that you set your highest priority patterns with the lowest rank. A pattern of rank 1 will be the first to get matched to a log line. A pattern of rank 1,000,000 will be last to get matched. When you configure custom log patterns from the console, a Low severity pattern translates to a 750,000 rank. A Medium severity pattern translates to a 500,000 rank. And a High severity pattern translates to a 250,000 rank. Rank values less than 1 or greater than 1,000,000 are reserved for AWS-provided patterns.

" } } }, @@ -1832,6 +1958,11 @@ "exception":true }, "Value":{"type":"double"}, + "WorkloadMetaData":{ + "type":"map", + "key":{"shape":"MetaDataKey"}, + "value":{"shape":"MetaDataValue"} + }, "XRayErrorPercent":{"type":"integer"}, "XRayFaultPercent":{"type":"integer"}, "XRayNodeName":{"type":"string"}, @@ -1840,5 +1971,5 @@ "XRayRequestCount":{"type":"integer"}, "XRayThrottlePercent":{"type":"integer"} }, - "documentation":"Amazon CloudWatch Application Insights for .NET and SQL Server

Amazon CloudWatch Application Insights for .NET and SQL Server is a service that helps you detect common problems with your .NET and SQL Server-based applications. It enables you to pinpoint the source of issues in your applications (built with technologies such as Microsoft IIS, .NET, and Microsoft SQL Server), by providing key insights into detected problems.

After you onboard your application, CloudWatch Application Insights for .NET and SQL Server identifies, recommends, and sets up metrics and logs. It continuously analyzes and correlates your metrics and logs for unusual behavior to surface actionable problems with your application. For example, if your application is slow and unresponsive and leading to HTTP 500 errors in your Application Load Balancer (ALB), Application Insights informs you that a memory pressure problem with your SQL Server database is occurring. It bases this analysis on impactful metrics and log errors.

" + "documentation":"Amazon CloudWatch Application Insights

Amazon CloudWatch Application Insights is a service that helps you detect common problems with your applications. It enables you to pinpoint the source of issues in your applications (built with technologies such as Microsoft IIS, .NET, and Microsoft SQL Server), by providing key insights into detected problems.

After you onboard your application, CloudWatch Application Insights identifies, recommends, and sets up metrics and logs. It continuously analyzes and correlates your metrics and logs for unusual behavior to surface actionable problems with your application. For example, if your application is slow and unresponsive and leading to HTTP 500 errors in your Application Load Balancer (ALB), Application Insights informs you that a memory pressure problem with your SQL Server database is occurring. It bases this analysis on impactful metrics and log errors.

" } diff --git a/services/appmesh/pom.xml b/services/appmesh/pom.xml index c93c8e13e1d4..901dd4716a43 100644 --- a/services/appmesh/pom.xml +++ b/services/appmesh/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT appmesh AWS Java SDK :: Services :: App Mesh diff --git a/services/appmesh/src/main/resources/codegen-resources/paginators-1.json b/services/appmesh/src/main/resources/codegen-resources/paginators-1.json index ac64b5684dab..03f248425e21 100644 --- a/services/appmesh/src/main/resources/codegen-resources/paginators-1.json +++ b/services/appmesh/src/main/resources/codegen-resources/paginators-1.json @@ -2,51 +2,51 @@ "pagination": { "ListGatewayRoutes": { "input_token": "nextToken", - "limit_key": "limit", "output_token": "nextToken", + "limit_key": "limit", "result_key": "gatewayRoutes" }, "ListMeshes": { "input_token": "nextToken", - "limit_key": "limit", "output_token": "nextToken", + "limit_key": "limit", "result_key": "meshes" }, "ListRoutes": { "input_token": "nextToken", - "limit_key": "limit", "output_token": "nextToken", + "limit_key": "limit", "result_key": "routes" }, "ListTagsForResource": { "input_token": "nextToken", - "limit_key": "limit", "output_token": "nextToken", + "limit_key": "limit", "result_key": "tags" }, "ListVirtualGateways": { "input_token": "nextToken", - "limit_key": "limit", "output_token": "nextToken", + "limit_key": "limit", "result_key": "virtualGateways" }, "ListVirtualNodes": { "input_token": "nextToken", - "limit_key": "limit", "output_token": "nextToken", + "limit_key": "limit", "result_key": "virtualNodes" }, "ListVirtualRouters": { "input_token": "nextToken", - "limit_key": "limit", "output_token": "nextToken", + "limit_key": "limit", "result_key": "virtualRouters" }, "ListVirtualServices": { "input_token": "nextToken", - "limit_key": "limit", "output_token": "nextToken", + "limit_key": "limit", "result_key": "virtualServices" } } -} \ No newline at end of file +} diff --git a/services/appmesh/src/main/resources/codegen-resources/service-2.json b/services/appmesh/src/main/resources/codegen-resources/service-2.json index 677662813455..a0949bdaf39a 100644 --- a/services/appmesh/src/main/resources/codegen-resources/service-2.json +++ b/services/appmesh/src/main/resources/codegen-resources/service-2.json @@ -1,5791 +1,4960 @@ { - "version": "2.0", - "metadata": { - "apiVersion": "2019-01-25", - "endpointPrefix": "appmesh", - "jsonVersion": "1.1", - "protocol": "rest-json", - "serviceFullName": "AWS App Mesh", - "serviceId": "App Mesh", - "signatureVersion": "v4", - "signingName": "appmesh", - "uid": "appmesh-2019-01-25" + "version":"2.0", + "metadata":{ + "apiVersion":"2019-01-25", + "endpointPrefix":"appmesh", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"AWS App Mesh", + "serviceId":"App Mesh", + "signatureVersion":"v4", + "signingName":"appmesh", + "uid":"appmesh-2019-01-25" }, - "documentation": "

AWS App Mesh is a service mesh based on the Envoy proxy that makes it easy to monitor and\n control microservices. App Mesh standardizes how your microservices communicate, giving you\n end-to-end visibility and helping to ensure high availability for your applications.

\n

App Mesh gives you consistent visibility and network traffic controls for every\n microservice in an application. You can use App Mesh with AWS Fargate, Amazon ECS, Amazon EKS,\n Kubernetes on AWS, and Amazon EC2.

\n \n

App Mesh supports microservice applications that use service discovery naming for their\n components. For more information about service discovery on Amazon ECS, see Service Discovery in the Amazon Elastic Container Service Developer Guide. Kubernetes\n kube-dns and coredns are supported. For more information,\n see DNS\n for Services and Pods in the Kubernetes documentation.

\n
", - "operations": { - "CreateGatewayRoute": { - "name": "CreateGatewayRoute", - "http": { - "method": "PUT", - "requestUri": "/v20190125/meshes/{meshName}/virtualGateway/{virtualGatewayName}/gatewayRoutes", - "responseCode": 200 - }, - "input": { - "shape": "CreateGatewayRouteInput" - }, - "output": { - "shape": "CreateGatewayRouteOutput" - }, - "errors": [ - { - "shape": "BadRequestException" - }, - { - "shape": "ConflictException" - }, - { - "shape": "ForbiddenException" - }, - { - "shape": "InternalServerErrorException" - }, - { - "shape": "LimitExceededException" - }, - { - "shape": "NotFoundException" - }, - { - "shape": "ServiceUnavailableException" - }, - { - "shape": "TooManyRequestsException" - } - ], - "documentation": "

Creates a gateway route.

\n

A gateway route is attached to a virtual gateway and routes traffic to an existing\n virtual service. If a route matches a request, it can distribute traffic to a target virtual service.

\n

For more information about gateway routes, see Gateway routes.

", - "idempotent": true - }, - "CreateMesh": { - "name": "CreateMesh", - "http": { - "method": "PUT", - "requestUri": "/v20190125/meshes", - "responseCode": 200 - }, - "input": { - "shape": "CreateMeshInput" - }, - "output": { - "shape": "CreateMeshOutput" - }, - "errors": [ - { - "shape": "BadRequestException" - }, - { - "shape": "ConflictException" - }, - { - "shape": "ForbiddenException" - }, - { - "shape": "InternalServerErrorException" - }, - { - "shape": "LimitExceededException" - }, - { - "shape": "NotFoundException" - }, - { - "shape": "ServiceUnavailableException" - }, - { - "shape": "TooManyRequestsException" - } - ], - "documentation": "

Creates a service mesh.

\n

A service mesh is a logical boundary for network traffic between services that are\n represented by resources within the mesh. After you create your service mesh, you can\n create virtual services, virtual nodes, virtual routers, and routes to distribute traffic\n between the applications in your mesh.

\n

For more information about service meshes, see Service meshes.

", - "idempotent": true - }, - "CreateRoute": { - "name": "CreateRoute", - "http": { - "method": "PUT", - "requestUri": "/v20190125/meshes/{meshName}/virtualRouter/{virtualRouterName}/routes", - "responseCode": 200 - }, - "input": { - "shape": "CreateRouteInput" - }, - "output": { - "shape": "CreateRouteOutput" - }, - "errors": [ - { - "shape": "BadRequestException" - }, - { - "shape": "ConflictException" - }, - { - "shape": "ForbiddenException" - }, - { - "shape": "InternalServerErrorException" - }, - { - "shape": "LimitExceededException" - }, - { - "shape": "NotFoundException" - }, - { - "shape": "ServiceUnavailableException" - }, - { - "shape": "TooManyRequestsException" - } - ], - "documentation": "

Creates a route that is associated with a virtual router.

\n

You can route several different protocols and define a retry policy for a route.\n Traffic can be routed to one or more virtual nodes.

\n

For more information about routes, see Routes.

", - "idempotent": true - }, - "CreateVirtualGateway": { - "name": "CreateVirtualGateway", - "http": { - "method": "PUT", - "requestUri": "/v20190125/meshes/{meshName}/virtualGateways", - "responseCode": 200 - }, - "input": { - "shape": "CreateVirtualGatewayInput" - }, - "output": { - "shape": "CreateVirtualGatewayOutput" - }, - "errors": [ - { - "shape": "BadRequestException" - }, - { - "shape": "ConflictException" - }, - { - "shape": "ForbiddenException" - }, - { - "shape": "InternalServerErrorException" - }, - { - "shape": "LimitExceededException" - }, - { - "shape": "NotFoundException" - }, - { - "shape": "ServiceUnavailableException" - }, - { - "shape": "TooManyRequestsException" - } - ], - "documentation": "

Creates a virtual gateway.

\n

A virtual gateway allows resources outside your mesh to communicate to resources that\n are inside your mesh. The virtual gateway represents an Envoy proxy running in an Amazon ECS\n task, in a Kubernetes service, or on an Amazon EC2 instance. Unlike a virtual node, which\n represents an Envoy running with an application, a virtual gateway represents Envoy deployed by itself.

\n

For more information about virtual gateways, see Virtual gateways.

", - "idempotent": true - }, - "CreateVirtualNode": { - "name": "CreateVirtualNode", - "http": { - "method": "PUT", - "requestUri": "/v20190125/meshes/{meshName}/virtualNodes", - "responseCode": 200 - }, - "input": { - "shape": "CreateVirtualNodeInput" - }, - "output": { - "shape": "CreateVirtualNodeOutput" - }, - "errors": [ - { - "shape": "BadRequestException" - }, - { - "shape": "ConflictException" - }, - { - "shape": "ForbiddenException" - }, - { - "shape": "InternalServerErrorException" - }, - { - "shape": "LimitExceededException" - }, - { - "shape": "NotFoundException" - }, - { - "shape": "ServiceUnavailableException" - }, - { - "shape": "TooManyRequestsException" - } - ], - "documentation": "

Creates a virtual node within a service mesh.

\n

A virtual node acts as a logical pointer to a particular task group, such as an Amazon ECS\n service or a Kubernetes deployment. When you create a virtual node, you can specify the\n service discovery information for your task group, and whether the proxy running in a task\n group will communicate with other proxies using Transport Layer Security (TLS).

\n

You define a listener for any inbound traffic that your virtual node\n expects. Any virtual service that your virtual node expects to communicate to is specified\n as a backend.

\n

The response metadata for your new virtual node contains the arn that is\n associated with the virtual node. Set this value (either the full ARN or the truncated\n resource name: for example, mesh/default/virtualNode/simpleapp) as the\n APPMESH_VIRTUAL_NODE_NAME environment variable for your task group's Envoy\n proxy container in your task definition or pod spec. This is then mapped to the\n node.id and node.cluster Envoy parameters.

\n \n

If you require your Envoy stats or tracing to use a different name, you can override\n the node.cluster value that is set by\n APPMESH_VIRTUAL_NODE_NAME with the\n APPMESH_VIRTUAL_NODE_CLUSTER environment variable.

\n
\n

For more information about virtual nodes, see Virtual nodes.

", - "idempotent": true - }, - "CreateVirtualRouter": { - "name": "CreateVirtualRouter", - "http": { - "method": "PUT", - "requestUri": "/v20190125/meshes/{meshName}/virtualRouters", - "responseCode": 200 - }, - "input": { - "shape": "CreateVirtualRouterInput" - }, - "output": { - "shape": "CreateVirtualRouterOutput" - }, - "errors": [ - { - "shape": "BadRequestException" - }, - { - "shape": "ConflictException" - }, - { - "shape": "ForbiddenException" - }, - { - "shape": "InternalServerErrorException" - }, - { - "shape": "LimitExceededException" - }, - { - "shape": "NotFoundException" - }, - { - "shape": "ServiceUnavailableException" - }, - { - "shape": "TooManyRequestsException" - } - ], - "documentation": "

Creates a virtual router within a service mesh.

\n

Specify a listener for any inbound traffic that your virtual router\n receives. Create a virtual router for each protocol and port that you need to route.\n Virtual routers handle traffic for one or more virtual services within your mesh. After you\n create your virtual router, create and associate routes for your virtual router that direct\n incoming requests to different virtual nodes.

\n

For more information about virtual routers, see Virtual routers.

", - "idempotent": true - }, - "CreateVirtualService": { - "name": "CreateVirtualService", - "http": { - "method": "PUT", - "requestUri": "/v20190125/meshes/{meshName}/virtualServices", - "responseCode": 200 - }, - "input": { - "shape": "CreateVirtualServiceInput" - }, - "output": { - "shape": "CreateVirtualServiceOutput" - }, - "errors": [ - { - "shape": "BadRequestException" - }, - { - "shape": "ConflictException" - }, - { - "shape": "ForbiddenException" - }, - { - "shape": "InternalServerErrorException" - }, - { - "shape": "LimitExceededException" - }, - { - "shape": "NotFoundException" - }, - { - "shape": "ServiceUnavailableException" - }, - { - "shape": "TooManyRequestsException" - } - ], - "documentation": "

Creates a virtual service within a service mesh.

\n

A virtual service is an abstraction of a real service that is provided by a virtual node\n directly or indirectly by means of a virtual router. Dependent services call your virtual\n service by its virtualServiceName, and those requests are routed to the\n virtual node or virtual router that is specified as the provider for the virtual\n service.

\n

For more information about virtual services, see Virtual services.

", - "idempotent": true - }, - "DeleteGatewayRoute": { - "name": "DeleteGatewayRoute", - "http": { - "method": "DELETE", - "requestUri": "/v20190125/meshes/{meshName}/virtualGateway/{virtualGatewayName}/gatewayRoutes/{gatewayRouteName}", - "responseCode": 200 - }, - "input": { - "shape": "DeleteGatewayRouteInput" - }, - "output": { - "shape": "DeleteGatewayRouteOutput" - }, - "errors": [ - { - "shape": "BadRequestException" - }, - { - "shape": "ForbiddenException" - }, - { - "shape": "InternalServerErrorException" - }, - { - "shape": "NotFoundException" - }, - { - "shape": "ResourceInUseException" - }, - { - "shape": "ServiceUnavailableException" - }, - { - "shape": "TooManyRequestsException" - } - ], - "documentation": "

Deletes an existing gateway route.

", - "idempotent": true - }, - "DeleteMesh": { - "name": "DeleteMesh", - "http": { - "method": "DELETE", - "requestUri": "/v20190125/meshes/{meshName}", - "responseCode": 200 - }, - "input": { - "shape": "DeleteMeshInput" - }, - "output": { - "shape": "DeleteMeshOutput" - }, - "errors": [ - { - "shape": "BadRequestException" - }, - { - "shape": "ForbiddenException" - }, - { - "shape": "InternalServerErrorException" - }, - { - "shape": "NotFoundException" - }, - { - "shape": "ResourceInUseException" - }, - { - "shape": "ServiceUnavailableException" - }, - { - "shape": "TooManyRequestsException" - } - ], - "documentation": "

Deletes an existing service mesh.

\n

You must delete all resources (virtual services, routes, virtual routers, and virtual\n nodes) in the service mesh before you can delete the mesh itself.

", - "idempotent": true - }, - "DeleteRoute": { - "name": "DeleteRoute", - "http": { - "method": "DELETE", - "requestUri": "/v20190125/meshes/{meshName}/virtualRouter/{virtualRouterName}/routes/{routeName}", - "responseCode": 200 - }, - "input": { - "shape": "DeleteRouteInput" - }, - "output": { - "shape": "DeleteRouteOutput" - }, - "errors": [ - { - "shape": "BadRequestException" - }, - { - "shape": "ForbiddenException" - }, - { - "shape": "InternalServerErrorException" - }, - { - "shape": "NotFoundException" - }, - { - "shape": "ResourceInUseException" - }, - { - "shape": "ServiceUnavailableException" - }, - { - "shape": "TooManyRequestsException" - } - ], - "documentation": "

Deletes an existing route.

", - "idempotent": true - }, - "DeleteVirtualGateway": { - "name": "DeleteVirtualGateway", - "http": { - "method": "DELETE", - "requestUri": "/v20190125/meshes/{meshName}/virtualGateways/{virtualGatewayName}", - "responseCode": 200 - }, - "input": { - "shape": "DeleteVirtualGatewayInput" - }, - "output": { - "shape": "DeleteVirtualGatewayOutput" - }, - "errors": [ - { - "shape": "BadRequestException" - }, - { - "shape": "ForbiddenException" - }, - { - "shape": "InternalServerErrorException" - }, - { - "shape": "NotFoundException" - }, - { - "shape": "ResourceInUseException" - }, - { - "shape": "ServiceUnavailableException" - }, - { - "shape": "TooManyRequestsException" - } - ], - "documentation": "

Deletes an existing virtual gateway. You cannot delete a virtual gateway if any gateway\n routes are associated to it.

", - "idempotent": true - }, - "DeleteVirtualNode": { - "name": "DeleteVirtualNode", - "http": { - "method": "DELETE", - "requestUri": "/v20190125/meshes/{meshName}/virtualNodes/{virtualNodeName}", - "responseCode": 200 - }, - "input": { - "shape": "DeleteVirtualNodeInput" - }, - "output": { - "shape": "DeleteVirtualNodeOutput" - }, - "errors": [ - { - "shape": "BadRequestException" - }, - { - "shape": "ForbiddenException" - }, - { - "shape": "InternalServerErrorException" - }, - { - "shape": "NotFoundException" - }, - { - "shape": "ResourceInUseException" - }, - { - "shape": "ServiceUnavailableException" - }, - { - "shape": "TooManyRequestsException" - } - ], - "documentation": "

Deletes an existing virtual node.

\n

You must delete any virtual services that list a virtual node as a service provider\n before you can delete the virtual node itself.

", - "idempotent": true - }, - "DeleteVirtualRouter": { - "name": "DeleteVirtualRouter", - "http": { - "method": "DELETE", - "requestUri": "/v20190125/meshes/{meshName}/virtualRouters/{virtualRouterName}", - "responseCode": 200 - }, - "input": { - "shape": "DeleteVirtualRouterInput" - }, - "output": { - "shape": "DeleteVirtualRouterOutput" - }, - "errors": [ - { - "shape": "BadRequestException" - }, - { - "shape": "ForbiddenException" - }, - { - "shape": "InternalServerErrorException" - }, - { - "shape": "NotFoundException" - }, - { - "shape": "ResourceInUseException" - }, - { - "shape": "ServiceUnavailableException" - }, - { - "shape": "TooManyRequestsException" - } - ], - "documentation": "

Deletes an existing virtual router.

\n

You must delete any routes associated with the virtual router before you can delete the\n router itself.

", - "idempotent": true - }, - "DeleteVirtualService": { - "name": "DeleteVirtualService", - "http": { - "method": "DELETE", - "requestUri": "/v20190125/meshes/{meshName}/virtualServices/{virtualServiceName}", - "responseCode": 200 - }, - "input": { - "shape": "DeleteVirtualServiceInput" - }, - "output": { - "shape": "DeleteVirtualServiceOutput" - }, - "errors": [ - { - "shape": "BadRequestException" - }, - { - "shape": "ForbiddenException" - }, - { - "shape": "InternalServerErrorException" - }, - { - "shape": "NotFoundException" - }, - { - "shape": "ResourceInUseException" - }, - { - "shape": "ServiceUnavailableException" - }, - { - "shape": "TooManyRequestsException" - } - ], - "documentation": "

Deletes an existing virtual service.

", - "idempotent": true - }, - "DescribeGatewayRoute": { - "name": "DescribeGatewayRoute", - "http": { - "method": "GET", - "requestUri": "/v20190125/meshes/{meshName}/virtualGateway/{virtualGatewayName}/gatewayRoutes/{gatewayRouteName}", - "responseCode": 200 - }, - "input": { - "shape": "DescribeGatewayRouteInput" - }, - "output": { - "shape": "DescribeGatewayRouteOutput" - }, - "errors": [ - { - "shape": "BadRequestException" - }, - { - "shape": "ForbiddenException" - }, - { - "shape": "InternalServerErrorException" - }, - { - "shape": "NotFoundException" - }, - { - "shape": "ServiceUnavailableException" - }, - { - "shape": "TooManyRequestsException" - } - ], - "documentation": "

Describes an existing gateway route.

" - }, - "DescribeMesh": { - "name": "DescribeMesh", - "http": { - "method": "GET", - "requestUri": "/v20190125/meshes/{meshName}", - "responseCode": 200 - }, - "input": { - "shape": "DescribeMeshInput" - }, - "output": { - "shape": "DescribeMeshOutput" - }, - "errors": [ - { - "shape": "BadRequestException" - }, - { - "shape": "ForbiddenException" - }, - { - "shape": "InternalServerErrorException" - }, - { - "shape": "NotFoundException" - }, - { - "shape": "ServiceUnavailableException" - }, - { - "shape": "TooManyRequestsException" - } - ], - "documentation": "

Describes an existing service mesh.

" - }, - "DescribeRoute": { - "name": "DescribeRoute", - "http": { - "method": "GET", - "requestUri": "/v20190125/meshes/{meshName}/virtualRouter/{virtualRouterName}/routes/{routeName}", - "responseCode": 200 - }, - "input": { - "shape": "DescribeRouteInput" - }, - "output": { - "shape": "DescribeRouteOutput" - }, - "errors": [ - { - "shape": "BadRequestException" - }, - { - "shape": "ForbiddenException" - }, - { - "shape": "InternalServerErrorException" - }, - { - "shape": "NotFoundException" - }, - { - "shape": "ServiceUnavailableException" - }, - { - "shape": "TooManyRequestsException" - } - ], - "documentation": "

Describes an existing route.

" - }, - "DescribeVirtualGateway": { - "name": "DescribeVirtualGateway", - "http": { - "method": "GET", - "requestUri": "/v20190125/meshes/{meshName}/virtualGateways/{virtualGatewayName}", - "responseCode": 200 - }, - "input": { - "shape": "DescribeVirtualGatewayInput" - }, - "output": { - "shape": "DescribeVirtualGatewayOutput" - }, - "errors": [ - { - "shape": "BadRequestException" - }, - { - "shape": "ForbiddenException" - }, - { - "shape": "InternalServerErrorException" - }, - { - "shape": "NotFoundException" - }, - { - "shape": "ServiceUnavailableException" - }, - { - "shape": "TooManyRequestsException" - } - ], - "documentation": "

Describes an existing virtual gateway.

" - }, - "DescribeVirtualNode": { - "name": "DescribeVirtualNode", - "http": { - "method": "GET", - "requestUri": "/v20190125/meshes/{meshName}/virtualNodes/{virtualNodeName}", - "responseCode": 200 - }, - "input": { - "shape": "DescribeVirtualNodeInput" - }, - "output": { - "shape": "DescribeVirtualNodeOutput" - }, - "errors": [ - { - "shape": "BadRequestException" - }, - { - "shape": "ForbiddenException" - }, - { - "shape": "InternalServerErrorException" - }, - { - "shape": "NotFoundException" - }, - { - "shape": "ServiceUnavailableException" - }, - { - "shape": "TooManyRequestsException" - } + "operations":{ + "CreateGatewayRoute":{ + "name":"CreateGatewayRoute", + "http":{ + "method":"PUT", + "requestUri":"/v20190125/meshes/{meshName}/virtualGateway/{virtualGatewayName}/gatewayRoutes", + "responseCode":200 + }, + "input":{"shape":"CreateGatewayRouteInput"}, + "output":{"shape":"CreateGatewayRouteOutput"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ForbiddenException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalServerErrorException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Creates a gateway route.

A gateway route is attached to a virtual gateway and routes traffic to an existing virtual service. If a route matches a request, it can distribute traffic to a target virtual service.

For more information about gateway routes, see Gateway routes.

", + "idempotent":true + }, + "CreateMesh":{ + "name":"CreateMesh", + "http":{ + "method":"PUT", + "requestUri":"/v20190125/meshes", + "responseCode":200 + }, + "input":{"shape":"CreateMeshInput"}, + "output":{"shape":"CreateMeshOutput"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ForbiddenException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalServerErrorException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Creates a service mesh.

A service mesh is a logical boundary for network traffic between services that are represented by resources within the mesh. After you create your service mesh, you can create virtual services, virtual nodes, virtual routers, and routes to distribute traffic between the applications in your mesh.

For more information about service meshes, see Service meshes.

", + "idempotent":true + }, + "CreateRoute":{ + "name":"CreateRoute", + "http":{ + "method":"PUT", + "requestUri":"/v20190125/meshes/{meshName}/virtualRouter/{virtualRouterName}/routes", + "responseCode":200 + }, + "input":{"shape":"CreateRouteInput"}, + "output":{"shape":"CreateRouteOutput"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ForbiddenException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalServerErrorException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Creates a route that is associated with a virtual router.

You can route several different protocols and define a retry policy for a route. Traffic can be routed to one or more virtual nodes.

For more information about routes, see Routes.

", + "idempotent":true + }, + "CreateVirtualGateway":{ + "name":"CreateVirtualGateway", + "http":{ + "method":"PUT", + "requestUri":"/v20190125/meshes/{meshName}/virtualGateways", + "responseCode":200 + }, + "input":{"shape":"CreateVirtualGatewayInput"}, + "output":{"shape":"CreateVirtualGatewayOutput"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ForbiddenException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalServerErrorException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Creates a virtual gateway.

A virtual gateway allows resources outside your mesh to communicate to resources that are inside your mesh. The virtual gateway represents an Envoy proxy running in an Amazon ECS task, in a Kubernetes service, or on an Amazon EC2 instance. Unlike a virtual node, which represents an Envoy running with an application, a virtual gateway represents Envoy deployed by itself.

For more information about virtual gateways, see Virtual gateways.

", + "idempotent":true + }, + "CreateVirtualNode":{ + "name":"CreateVirtualNode", + "http":{ + "method":"PUT", + "requestUri":"/v20190125/meshes/{meshName}/virtualNodes", + "responseCode":200 + }, + "input":{"shape":"CreateVirtualNodeInput"}, + "output":{"shape":"CreateVirtualNodeOutput"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ForbiddenException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalServerErrorException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Creates a virtual node within a service mesh.

A virtual node acts as a logical pointer to a particular task group, such as an Amazon ECS service or a Kubernetes deployment. When you create a virtual node, you can specify the service discovery information for your task group, and whether the proxy running in a task group will communicate with other proxies using Transport Layer Security (TLS).

You define a listener for any inbound traffic that your virtual node expects. Any virtual service that your virtual node expects to communicate to is specified as a backend.

The response metadata for your new virtual node contains the arn that is associated with the virtual node. Set this value to the full ARN; for example, arn:aws:appmesh:us-west-2:123456789012:myMesh/default/virtualNode/myApp) as the APPMESH_RESOURCE_ARN environment variable for your task group's Envoy proxy container in your task definition or pod spec. This is then mapped to the node.id and node.cluster Envoy parameters.

By default, App Mesh uses the name of the resource you specified in APPMESH_RESOURCE_ARN when Envoy is referring to itself in metrics and traces. You can override this behavior by setting the APPMESH_RESOURCE_CLUSTER environment variable with your own name.

AWS Cloud Map is not available in the eu-south-1 Region.

For more information about virtual nodes, see Virtual nodes. You must be using 1.15.0 or later of the Envoy image when setting these variables. For more information about App Mesh Envoy variables, see Envoy image in the AWS App Mesh User Guide.

", + "idempotent":true + }, + "CreateVirtualRouter":{ + "name":"CreateVirtualRouter", + "http":{ + "method":"PUT", + "requestUri":"/v20190125/meshes/{meshName}/virtualRouters", + "responseCode":200 + }, + "input":{"shape":"CreateVirtualRouterInput"}, + "output":{"shape":"CreateVirtualRouterOutput"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ForbiddenException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalServerErrorException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Creates a virtual router within a service mesh.

Specify a listener for any inbound traffic that your virtual router receives. Create a virtual router for each protocol and port that you need to route. Virtual routers handle traffic for one or more virtual services within your mesh. After you create your virtual router, create and associate routes for your virtual router that direct incoming requests to different virtual nodes.

For more information about virtual routers, see Virtual routers.

", + "idempotent":true + }, + "CreateVirtualService":{ + "name":"CreateVirtualService", + "http":{ + "method":"PUT", + "requestUri":"/v20190125/meshes/{meshName}/virtualServices", + "responseCode":200 + }, + "input":{"shape":"CreateVirtualServiceInput"}, + "output":{"shape":"CreateVirtualServiceOutput"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ForbiddenException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalServerErrorException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Creates a virtual service within a service mesh.

A virtual service is an abstraction of a real service that is provided by a virtual node directly or indirectly by means of a virtual router. Dependent services call your virtual service by its virtualServiceName, and those requests are routed to the virtual node or virtual router that is specified as the provider for the virtual service.

For more information about virtual services, see Virtual services.

", + "idempotent":true + }, + "DeleteGatewayRoute":{ + "name":"DeleteGatewayRoute", + "http":{ + "method":"DELETE", + "requestUri":"/v20190125/meshes/{meshName}/virtualGateway/{virtualGatewayName}/gatewayRoutes/{gatewayRouteName}", + "responseCode":200 + }, + "input":{"shape":"DeleteGatewayRouteInput"}, + "output":{"shape":"DeleteGatewayRouteOutput"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ForbiddenException"}, + {"shape":"ResourceInUseException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalServerErrorException"} + ], + "documentation":"

Deletes an existing gateway route.

", + "idempotent":true + }, + "DeleteMesh":{ + "name":"DeleteMesh", + "http":{ + "method":"DELETE", + "requestUri":"/v20190125/meshes/{meshName}", + "responseCode":200 + }, + "input":{"shape":"DeleteMeshInput"}, + "output":{"shape":"DeleteMeshOutput"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ForbiddenException"}, + {"shape":"ResourceInUseException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalServerErrorException"} + ], + "documentation":"

Deletes an existing service mesh.

You must delete all resources (virtual services, routes, virtual routers, and virtual nodes) in the service mesh before you can delete the mesh itself.

", + "idempotent":true + }, + "DeleteRoute":{ + "name":"DeleteRoute", + "http":{ + "method":"DELETE", + "requestUri":"/v20190125/meshes/{meshName}/virtualRouter/{virtualRouterName}/routes/{routeName}", + "responseCode":200 + }, + "input":{"shape":"DeleteRouteInput"}, + "output":{"shape":"DeleteRouteOutput"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ForbiddenException"}, + {"shape":"ResourceInUseException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalServerErrorException"} + ], + "documentation":"

Deletes an existing route.

", + "idempotent":true + }, + "DeleteVirtualGateway":{ + "name":"DeleteVirtualGateway", + "http":{ + "method":"DELETE", + "requestUri":"/v20190125/meshes/{meshName}/virtualGateways/{virtualGatewayName}", + "responseCode":200 + }, + "input":{"shape":"DeleteVirtualGatewayInput"}, + "output":{"shape":"DeleteVirtualGatewayOutput"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ForbiddenException"}, + {"shape":"ResourceInUseException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalServerErrorException"} + ], + "documentation":"

Deletes an existing virtual gateway. You cannot delete a virtual gateway if any gateway routes are associated to it.

", + "idempotent":true + }, + "DeleteVirtualNode":{ + "name":"DeleteVirtualNode", + "http":{ + "method":"DELETE", + "requestUri":"/v20190125/meshes/{meshName}/virtualNodes/{virtualNodeName}", + "responseCode":200 + }, + "input":{"shape":"DeleteVirtualNodeInput"}, + "output":{"shape":"DeleteVirtualNodeOutput"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ForbiddenException"}, + {"shape":"ResourceInUseException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalServerErrorException"} + ], + "documentation":"

Deletes an existing virtual node.

You must delete any virtual services that list a virtual node as a service provider before you can delete the virtual node itself.

", + "idempotent":true + }, + "DeleteVirtualRouter":{ + "name":"DeleteVirtualRouter", + "http":{ + "method":"DELETE", + "requestUri":"/v20190125/meshes/{meshName}/virtualRouters/{virtualRouterName}", + "responseCode":200 + }, + "input":{"shape":"DeleteVirtualRouterInput"}, + "output":{"shape":"DeleteVirtualRouterOutput"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ForbiddenException"}, + {"shape":"ResourceInUseException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalServerErrorException"} + ], + "documentation":"

Deletes an existing virtual router.

You must delete any routes associated with the virtual router before you can delete the router itself.

", + "idempotent":true + }, + "DeleteVirtualService":{ + "name":"DeleteVirtualService", + "http":{ + "method":"DELETE", + "requestUri":"/v20190125/meshes/{meshName}/virtualServices/{virtualServiceName}", + "responseCode":200 + }, + "input":{"shape":"DeleteVirtualServiceInput"}, + "output":{"shape":"DeleteVirtualServiceOutput"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ForbiddenException"}, + {"shape":"ResourceInUseException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalServerErrorException"} + ], + "documentation":"

Deletes an existing virtual service.

", + "idempotent":true + }, + "DescribeGatewayRoute":{ + "name":"DescribeGatewayRoute", + "http":{ + "method":"GET", + "requestUri":"/v20190125/meshes/{meshName}/virtualGateway/{virtualGatewayName}/gatewayRoutes/{gatewayRouteName}", + "responseCode":200 + }, + "input":{"shape":"DescribeGatewayRouteInput"}, + "output":{"shape":"DescribeGatewayRouteOutput"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ForbiddenException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalServerErrorException"} + ], + "documentation":"

Describes an existing gateway route.

" + }, + "DescribeMesh":{ + "name":"DescribeMesh", + "http":{ + "method":"GET", + "requestUri":"/v20190125/meshes/{meshName}", + "responseCode":200 + }, + "input":{"shape":"DescribeMeshInput"}, + "output":{"shape":"DescribeMeshOutput"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ForbiddenException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalServerErrorException"} + ], + "documentation":"

Describes an existing service mesh.

" + }, + "DescribeRoute":{ + "name":"DescribeRoute", + "http":{ + "method":"GET", + "requestUri":"/v20190125/meshes/{meshName}/virtualRouter/{virtualRouterName}/routes/{routeName}", + "responseCode":200 + }, + "input":{"shape":"DescribeRouteInput"}, + "output":{"shape":"DescribeRouteOutput"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ForbiddenException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalServerErrorException"} + ], + "documentation":"

Describes an existing route.

" + }, + "DescribeVirtualGateway":{ + "name":"DescribeVirtualGateway", + "http":{ + "method":"GET", + "requestUri":"/v20190125/meshes/{meshName}/virtualGateways/{virtualGatewayName}", + "responseCode":200 + }, + "input":{"shape":"DescribeVirtualGatewayInput"}, + "output":{"shape":"DescribeVirtualGatewayOutput"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ForbiddenException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalServerErrorException"} + ], + "documentation":"

Describes an existing virtual gateway.

" + }, + "DescribeVirtualNode":{ + "name":"DescribeVirtualNode", + "http":{ + "method":"GET", + "requestUri":"/v20190125/meshes/{meshName}/virtualNodes/{virtualNodeName}", + "responseCode":200 + }, + "input":{"shape":"DescribeVirtualNodeInput"}, + "output":{"shape":"DescribeVirtualNodeOutput"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ForbiddenException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalServerErrorException"} + ], + "documentation":"

Describes an existing virtual node.

" + }, + "DescribeVirtualRouter":{ + "name":"DescribeVirtualRouter", + "http":{ + "method":"GET", + "requestUri":"/v20190125/meshes/{meshName}/virtualRouters/{virtualRouterName}", + "responseCode":200 + }, + "input":{"shape":"DescribeVirtualRouterInput"}, + "output":{"shape":"DescribeVirtualRouterOutput"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ForbiddenException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalServerErrorException"} + ], + "documentation":"

Describes an existing virtual router.

" + }, + "DescribeVirtualService":{ + "name":"DescribeVirtualService", + "http":{ + "method":"GET", + "requestUri":"/v20190125/meshes/{meshName}/virtualServices/{virtualServiceName}", + "responseCode":200 + }, + "input":{"shape":"DescribeVirtualServiceInput"}, + "output":{"shape":"DescribeVirtualServiceOutput"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ForbiddenException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalServerErrorException"} + ], + "documentation":"

Describes an existing virtual service.

" + }, + "ListGatewayRoutes":{ + "name":"ListGatewayRoutes", + "http":{ + "method":"GET", + "requestUri":"/v20190125/meshes/{meshName}/virtualGateway/{virtualGatewayName}/gatewayRoutes", + "responseCode":200 + }, + "input":{"shape":"ListGatewayRoutesInput"}, + "output":{"shape":"ListGatewayRoutesOutput"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ForbiddenException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalServerErrorException"} + ], + "documentation":"

Returns a list of existing gateway routes that are associated to a virtual gateway.

" + }, + "ListMeshes":{ + "name":"ListMeshes", + "http":{ + "method":"GET", + "requestUri":"/v20190125/meshes", + "responseCode":200 + }, + "input":{"shape":"ListMeshesInput"}, + "output":{"shape":"ListMeshesOutput"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ForbiddenException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalServerErrorException"} + ], + "documentation":"

Returns a list of existing service meshes.

" + }, + "ListRoutes":{ + "name":"ListRoutes", + "http":{ + "method":"GET", + "requestUri":"/v20190125/meshes/{meshName}/virtualRouter/{virtualRouterName}/routes", + "responseCode":200 + }, + "input":{"shape":"ListRoutesInput"}, + "output":{"shape":"ListRoutesOutput"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ForbiddenException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalServerErrorException"} + ], + "documentation":"

Returns a list of existing routes in a service mesh.

" + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/v20190125/tags", + "responseCode":200 + }, + "input":{"shape":"ListTagsForResourceInput"}, + "output":{"shape":"ListTagsForResourceOutput"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ForbiddenException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalServerErrorException"} + ], + "documentation":"

List the tags for an App Mesh resource.

" + }, + "ListVirtualGateways":{ + "name":"ListVirtualGateways", + "http":{ + "method":"GET", + "requestUri":"/v20190125/meshes/{meshName}/virtualGateways", + "responseCode":200 + }, + "input":{"shape":"ListVirtualGatewaysInput"}, + "output":{"shape":"ListVirtualGatewaysOutput"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ForbiddenException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalServerErrorException"} + ], + "documentation":"

Returns a list of existing virtual gateways in a service mesh.

" + }, + "ListVirtualNodes":{ + "name":"ListVirtualNodes", + "http":{ + "method":"GET", + "requestUri":"/v20190125/meshes/{meshName}/virtualNodes", + "responseCode":200 + }, + "input":{"shape":"ListVirtualNodesInput"}, + "output":{"shape":"ListVirtualNodesOutput"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ForbiddenException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalServerErrorException"} + ], + "documentation":"

Returns a list of existing virtual nodes.

" + }, + "ListVirtualRouters":{ + "name":"ListVirtualRouters", + "http":{ + "method":"GET", + "requestUri":"/v20190125/meshes/{meshName}/virtualRouters", + "responseCode":200 + }, + "input":{"shape":"ListVirtualRoutersInput"}, + "output":{"shape":"ListVirtualRoutersOutput"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ForbiddenException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalServerErrorException"} + ], + "documentation":"

Returns a list of existing virtual routers in a service mesh.

" + }, + "ListVirtualServices":{ + "name":"ListVirtualServices", + "http":{ + "method":"GET", + "requestUri":"/v20190125/meshes/{meshName}/virtualServices", + "responseCode":200 + }, + "input":{"shape":"ListVirtualServicesInput"}, + "output":{"shape":"ListVirtualServicesOutput"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ForbiddenException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalServerErrorException"} + ], + "documentation":"

Returns a list of existing virtual services in a service mesh.

" + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"PUT", + "requestUri":"/v20190125/tag", + "responseCode":200 + }, + "input":{"shape":"TagResourceInput"}, + "output":{"shape":"TagResourceOutput"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"TooManyTagsException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ForbiddenException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalServerErrorException"} + ], + "documentation":"

Associates the specified tags to a resource with the specified resourceArn. If existing tags on a resource aren't specified in the request parameters, they aren't changed. When a resource is deleted, the tags associated with that resource are also deleted.

", + "idempotent":true + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"PUT", + "requestUri":"/v20190125/untag", + "responseCode":200 + }, + "input":{"shape":"UntagResourceInput"}, + "output":{"shape":"UntagResourceOutput"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ForbiddenException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalServerErrorException"} + ], + "documentation":"

Deletes specified tags from a resource.

", + "idempotent":true + }, + "UpdateGatewayRoute":{ + "name":"UpdateGatewayRoute", + "http":{ + "method":"PUT", + "requestUri":"/v20190125/meshes/{meshName}/virtualGateway/{virtualGatewayName}/gatewayRoutes/{gatewayRouteName}", + "responseCode":200 + }, + "input":{"shape":"UpdateGatewayRouteInput"}, + "output":{"shape":"UpdateGatewayRouteOutput"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ForbiddenException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalServerErrorException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Updates an existing gateway route that is associated to a specified virtual gateway in a service mesh.

", + "idempotent":true + }, + "UpdateMesh":{ + "name":"UpdateMesh", + "http":{ + "method":"PUT", + "requestUri":"/v20190125/meshes/{meshName}", + "responseCode":200 + }, + "input":{"shape":"UpdateMeshInput"}, + "output":{"shape":"UpdateMeshOutput"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ForbiddenException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalServerErrorException"} + ], + "documentation":"

Updates an existing service mesh.

", + "idempotent":true + }, + "UpdateRoute":{ + "name":"UpdateRoute", + "http":{ + "method":"PUT", + "requestUri":"/v20190125/meshes/{meshName}/virtualRouter/{virtualRouterName}/routes/{routeName}", + "responseCode":200 + }, + "input":{"shape":"UpdateRouteInput"}, + "output":{"shape":"UpdateRouteOutput"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ForbiddenException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalServerErrorException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Updates an existing route for a specified service mesh and virtual router.

", + "idempotent":true + }, + "UpdateVirtualGateway":{ + "name":"UpdateVirtualGateway", + "http":{ + "method":"PUT", + "requestUri":"/v20190125/meshes/{meshName}/virtualGateways/{virtualGatewayName}", + "responseCode":200 + }, + "input":{"shape":"UpdateVirtualGatewayInput"}, + "output":{"shape":"UpdateVirtualGatewayOutput"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ForbiddenException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalServerErrorException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Updates an existing virtual gateway in a specified service mesh.

", + "idempotent":true + }, + "UpdateVirtualNode":{ + "name":"UpdateVirtualNode", + "http":{ + "method":"PUT", + "requestUri":"/v20190125/meshes/{meshName}/virtualNodes/{virtualNodeName}", + "responseCode":200 + }, + "input":{"shape":"UpdateVirtualNodeInput"}, + "output":{"shape":"UpdateVirtualNodeOutput"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ForbiddenException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalServerErrorException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Updates an existing virtual node in a specified service mesh.

", + "idempotent":true + }, + "UpdateVirtualRouter":{ + "name":"UpdateVirtualRouter", + "http":{ + "method":"PUT", + "requestUri":"/v20190125/meshes/{meshName}/virtualRouters/{virtualRouterName}", + "responseCode":200 + }, + "input":{"shape":"UpdateVirtualRouterInput"}, + "output":{"shape":"UpdateVirtualRouterOutput"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ForbiddenException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalServerErrorException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Updates an existing virtual router in a specified service mesh.

", + "idempotent":true + }, + "UpdateVirtualService":{ + "name":"UpdateVirtualService", + "http":{ + "method":"PUT", + "requestUri":"/v20190125/meshes/{meshName}/virtualServices/{virtualServiceName}", + "responseCode":200 + }, + "input":{"shape":"UpdateVirtualServiceInput"}, + "output":{"shape":"UpdateVirtualServiceOutput"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ForbiddenException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalServerErrorException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Updates an existing virtual service in a specified service mesh.

", + "idempotent":true + } + }, + "shapes":{ + "AccessLog":{ + "type":"structure", + "members":{ + "file":{ + "shape":"FileAccessLog", + "documentation":"

The file object to send virtual node access logs to.

" + } + }, + "documentation":"

An object that represents the access logging information for a virtual node.

", + "union":true + }, + "AccountId":{ + "type":"string", + "max":12, + "min":12 + }, + "Arn":{"type":"string"}, + "AwsCloudMapInstanceAttribute":{ + "type":"structure", + "required":[ + "key", + "value" ], - "documentation": "

Describes an existing virtual node.

" - }, - "DescribeVirtualRouter": { - "name": "DescribeVirtualRouter", - "http": { - "method": "GET", - "requestUri": "/v20190125/meshes/{meshName}/virtualRouters/{virtualRouterName}", - "responseCode": 200 - }, - "input": { - "shape": "DescribeVirtualRouterInput" - }, - "output": { - "shape": "DescribeVirtualRouterOutput" - }, - "errors": [ - { - "shape": "BadRequestException" - }, - { - "shape": "ForbiddenException" + "members":{ + "key":{ + "shape":"AwsCloudMapInstanceAttributeKey", + "documentation":"

The name of an AWS Cloud Map service instance attribute key. Any AWS Cloud Map service instance that contains the specified key and value is returned.

" }, - { - "shape": "InternalServerErrorException" - }, - { - "shape": "NotFoundException" - }, - { - "shape": "ServiceUnavailableException" - }, - { - "shape": "TooManyRequestsException" + "value":{ + "shape":"AwsCloudMapInstanceAttributeValue", + "documentation":"

The value of an AWS Cloud Map service instance attribute key. Any AWS Cloud Map service instance that contains the specified key and value is returned.

" } - ], - "documentation": "

Describes an existing virtual router.

" - }, - "DescribeVirtualService": { - "name": "DescribeVirtualService", - "http": { - "method": "GET", - "requestUri": "/v20190125/meshes/{meshName}/virtualServices/{virtualServiceName}", - "responseCode": 200 }, - "input": { - "shape": "DescribeVirtualServiceInput" - }, - "output": { - "shape": "DescribeVirtualServiceOutput" - }, - "errors": [ - { - "shape": "BadRequestException" - }, - { - "shape": "ForbiddenException" - }, - { - "shape": "InternalServerErrorException" - }, - { - "shape": "NotFoundException" - }, - { - "shape": "ServiceUnavailableException" - }, - { - "shape": "TooManyRequestsException" - } - ], - "documentation": "

Describes an existing virtual service.

" + "documentation":"

An object that represents the AWS Cloud Map attribute information for your virtual node.

AWS Cloud Map is not available in the eu-south-1 Region.

" }, - "ListGatewayRoutes": { - "name": "ListGatewayRoutes", - "http": { - "method": "GET", - "requestUri": "/v20190125/meshes/{meshName}/virtualGateway/{virtualGatewayName}/gatewayRoutes", - "responseCode": 200 - }, - "input": { - "shape": "ListGatewayRoutesInput" - }, - "output": { - "shape": "ListGatewayRoutesOutput" - }, - "errors": [ - { - "shape": "BadRequestException" - }, - { - "shape": "ForbiddenException" - }, - { - "shape": "InternalServerErrorException" - }, - { - "shape": "NotFoundException" - }, - { - "shape": "ServiceUnavailableException" - }, - { - "shape": "TooManyRequestsException" - } - ], - "documentation": "

Returns a list of existing gateway routes that are associated to a virtual\n gateway.

" + "AwsCloudMapInstanceAttributeKey":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^[a-zA-Z0-9!-~]+$" }, - "ListMeshes": { - "name": "ListMeshes", - "http": { - "method": "GET", - "requestUri": "/v20190125/meshes", - "responseCode": 200 - }, - "input": { - "shape": "ListMeshesInput" - }, - "output": { - "shape": "ListMeshesOutput" - }, - "errors": [ - { - "shape": "BadRequestException" - }, - { - "shape": "ForbiddenException" - }, - { - "shape": "InternalServerErrorException" - }, - { - "shape": "NotFoundException" - }, - { - "shape": "ServiceUnavailableException" - }, - { - "shape": "TooManyRequestsException" - } - ], - "documentation": "

Returns a list of existing service meshes.

" + "AwsCloudMapInstanceAttributeValue":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"^([a-zA-Z0-9!-~][ a-zA-Z0-9!-~]*){0,1}[a-zA-Z0-9!-~]{0,1}$" }, - "ListRoutes": { - "name": "ListRoutes", - "http": { - "method": "GET", - "requestUri": "/v20190125/meshes/{meshName}/virtualRouter/{virtualRouterName}/routes", - "responseCode": 200 - }, - "input": { - "shape": "ListRoutesInput" - }, - "output": { - "shape": "ListRoutesOutput" - }, - "errors": [ - { - "shape": "BadRequestException" - }, - { - "shape": "ForbiddenException" - }, - { - "shape": "InternalServerErrorException" - }, - { - "shape": "NotFoundException" - }, - { - "shape": "ServiceUnavailableException" - }, - { - "shape": "TooManyRequestsException" - } - ], - "documentation": "

Returns a list of existing routes in a service mesh.

" + "AwsCloudMapInstanceAttributes":{ + "type":"list", + "member":{"shape":"AwsCloudMapInstanceAttribute"} }, - "ListTagsForResource": { - "name": "ListTagsForResource", - "http": { - "method": "GET", - "requestUri": "/v20190125/tags", - "responseCode": 200 - }, - "input": { - "shape": "ListTagsForResourceInput" - }, - "output": { - "shape": "ListTagsForResourceOutput" - }, - "errors": [ - { - "shape": "BadRequestException" - }, - { - "shape": "ForbiddenException" - }, - { - "shape": "InternalServerErrorException" - }, - { - "shape": "NotFoundException" - }, - { - "shape": "ServiceUnavailableException" - }, - { - "shape": "TooManyRequestsException" - } - ], - "documentation": "

List the tags for an App Mesh resource.

" + "AwsCloudMapName":{ + "type":"string", + "max":1024, + "min":1 }, - "ListVirtualGateways": { - "name": "ListVirtualGateways", - "http": { - "method": "GET", - "requestUri": "/v20190125/meshes/{meshName}/virtualGateways", - "responseCode": 200 - }, - "input": { - "shape": "ListVirtualGatewaysInput" - }, - "output": { - "shape": "ListVirtualGatewaysOutput" - }, - "errors": [ - { - "shape": "BadRequestException" - }, - { - "shape": "ForbiddenException" - }, - { - "shape": "InternalServerErrorException" - }, - { - "shape": "NotFoundException" - }, - { - "shape": "ServiceUnavailableException" - }, - { - "shape": "TooManyRequestsException" - } + "AwsCloudMapServiceDiscovery":{ + "type":"structure", + "required":[ + "namespaceName", + "serviceName" ], - "documentation": "

Returns a list of existing virtual gateways in a service mesh.

" - }, - "ListVirtualNodes": { - "name": "ListVirtualNodes", - "http": { - "method": "GET", - "requestUri": "/v20190125/meshes/{meshName}/virtualNodes", - "responseCode": 200 - }, - "input": { - "shape": "ListVirtualNodesInput" - }, - "output": { - "shape": "ListVirtualNodesOutput" - }, - "errors": [ - { - "shape": "BadRequestException" - }, - { - "shape": "ForbiddenException" - }, - { - "shape": "InternalServerErrorException" - }, - { - "shape": "NotFoundException" + "members":{ + "attributes":{ + "shape":"AwsCloudMapInstanceAttributes", + "documentation":"

A string map that contains attributes with values that you can use to filter instances by any custom attribute that you specified when you registered the instance. Only instances that match all of the specified key/value pairs will be returned.

" }, - { - "shape": "ServiceUnavailableException" + "namespaceName":{ + "shape":"AwsCloudMapName", + "documentation":"

The name of the AWS Cloud Map namespace to use.

" }, - { - "shape": "TooManyRequestsException" + "serviceName":{ + "shape":"AwsCloudMapName", + "documentation":"

The name of the AWS Cloud Map service to use.

" } - ], - "documentation": "

Returns a list of existing virtual nodes.

" - }, - "ListVirtualRouters": { - "name": "ListVirtualRouters", - "http": { - "method": "GET", - "requestUri": "/v20190125/meshes/{meshName}/virtualRouters", - "responseCode": 200 }, - "input": { - "shape": "ListVirtualRoutersInput" - }, - "output": { - "shape": "ListVirtualRoutersOutput" + "documentation":"

An object that represents the AWS Cloud Map service discovery information for your virtual node.

AWS Cloud Map is not available in the eu-south-1 Region.

" + }, + "Backend":{ + "type":"structure", + "members":{ + "virtualService":{ + "shape":"VirtualServiceBackend", + "documentation":"

Specifies a virtual service to use as a backend.

" + } }, - "errors": [ - { - "shape": "BadRequestException" - }, - { - "shape": "ForbiddenException" - }, - { - "shape": "InternalServerErrorException" - }, - { - "shape": "NotFoundException" - }, - { - "shape": "ServiceUnavailableException" - }, - { - "shape": "TooManyRequestsException" + "documentation":"

An object that represents the backends that a virtual node is expected to send outbound traffic to.

", + "union":true + }, + "BackendDefaults":{ + "type":"structure", + "members":{ + "clientPolicy":{ + "shape":"ClientPolicy", + "documentation":"

A reference to an object that represents a client policy.

" } - ], - "documentation": "

Returns a list of existing virtual routers in a service mesh.

" + }, + "documentation":"

An object that represents the default properties for a backend.

" + }, + "Backends":{ + "type":"list", + "member":{"shape":"Backend"} }, - "ListVirtualServices": { - "name": "ListVirtualServices", - "http": { - "method": "GET", - "requestUri": "/v20190125/meshes/{meshName}/virtualServices", - "responseCode": 200 + "BadRequestException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} }, - "input": { - "shape": "ListVirtualServicesInput" + "documentation":"

The request syntax was malformed. Check your request syntax and try again.

", + "error":{ + "httpStatusCode":400, + "senderFault":true }, - "output": { - "shape": "ListVirtualServicesOutput" + "exception":true + }, + "Boolean":{ + "type":"boolean", + "box":true + }, + "CertificateAuthorityArns":{ + "type":"list", + "member":{"shape":"Arn"}, + "max":3, + "min":1 + }, + "ClientPolicy":{ + "type":"structure", + "members":{ + "tls":{ + "shape":"ClientPolicyTls", + "documentation":"

A reference to an object that represents a Transport Layer Security (TLS) client policy.

" + } }, - "errors": [ - { - "shape": "BadRequestException" - }, - { - "shape": "ForbiddenException" - }, - { - "shape": "InternalServerErrorException" - }, - { - "shape": "NotFoundException" + "documentation":"

An object that represents a client policy.

" + }, + "ClientPolicyTls":{ + "type":"structure", + "required":["validation"], + "members":{ + "enforce":{ + "shape":"Boolean", + "documentation":"

Whether the policy is enforced. The default is True, if a value isn't specified.

", + "box":true }, - { - "shape": "ServiceUnavailableException" + "ports":{ + "shape":"PortSet", + "documentation":"

One or more ports that the policy is enforced for.

" }, - { - "shape": "TooManyRequestsException" + "validation":{ + "shape":"TlsValidationContext", + "documentation":"

A reference to an object that represents a TLS validation context.

" } - ], - "documentation": "

Returns a list of existing virtual services in a service mesh.

" - }, - "TagResource": { - "name": "TagResource", - "http": { - "method": "PUT", - "requestUri": "/v20190125/tag", - "responseCode": 200 }, - "input": { - "shape": "TagResourceInput" + "documentation":"

A reference to an object that represents a Transport Layer Security (TLS) client policy.

" + }, + "ConflictException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} }, - "output": { - "shape": "TagResourceOutput" + "documentation":"

The request contains a client token that was used for a previous update resource call with different specifications. Try the request again with a new client token.

", + "error":{ + "httpStatusCode":409, + "senderFault":true }, - "errors": [ - { - "shape": "BadRequestException" - }, - { - "shape": "ForbiddenException" - }, - { - "shape": "InternalServerErrorException" - }, - { - "shape": "NotFoundException" - }, - { - "shape": "ServiceUnavailableException" - }, - { - "shape": "TooManyRequestsException" - }, - { - "shape": "TooManyTagsException" - } + "exception":true + }, + "CreateGatewayRouteInput":{ + "type":"structure", + "required":[ + "gatewayRouteName", + "meshName", + "spec", + "virtualGatewayName" ], - "documentation": "

Associates the specified tags to a resource with the specified resourceArn.\n If existing tags on a resource aren't specified in the request parameters, they aren't\n changed. When a resource is deleted, the tags associated with that resource are also\n deleted.

", - "idempotent": true + "members":{ + "clientToken":{ + "shape":"String", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Up to 36 letters, numbers, hyphens, and underscores are allowed.

", + "idempotencyToken":true + }, + "gatewayRouteName":{ + "shape":"ResourceName", + "documentation":"

The name to use for the gateway route.

" + }, + "meshName":{ + "shape":"ResourceName", + "documentation":"

The name of the service mesh to create the gateway route in.

", + "location":"uri", + "locationName":"meshName" + }, + "meshOwner":{ + "shape":"AccountId", + "documentation":"

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then the account that you specify must share the mesh with your account before you can create the resource in the service mesh. For more information about mesh sharing, see Working with shared meshes.

", + "location":"querystring", + "locationName":"meshOwner" + }, + "spec":{ + "shape":"GatewayRouteSpec", + "documentation":"

The gateway route specification to apply.

" + }, + "tags":{ + "shape":"TagList", + "documentation":"

Optional metadata that you can apply to the gateway route to assist with categorization and organization. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

" + }, + "virtualGatewayName":{ + "shape":"ResourceName", + "documentation":"

The name of the virtual gateway to associate the gateway route with. If the virtual gateway is in a shared mesh, then you must be the owner of the virtual gateway resource.

", + "location":"uri", + "locationName":"virtualGatewayName" + } + } }, - "UntagResource": { - "name": "UntagResource", - "http": { - "method": "PUT", - "requestUri": "/v20190125/untag", - "responseCode": 200 - }, - "input": { - "shape": "UntagResourceInput" - }, - "output": { - "shape": "UntagResourceOutput" + "CreateGatewayRouteOutput":{ + "type":"structure", + "required":["gatewayRoute"], + "members":{ + "gatewayRoute":{ + "shape":"GatewayRouteData", + "documentation":"

The full description of your gateway route following the create call.

" + } }, - "errors": [ - { - "shape": "BadRequestException" - }, - { - "shape": "ForbiddenException" - }, - { - "shape": "InternalServerErrorException" + "payload":"gatewayRoute" + }, + "CreateMeshInput":{ + "type":"structure", + "required":["meshName"], + "members":{ + "clientToken":{ + "shape":"String", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Up to 36 letters, numbers, hyphens, and underscores are allowed.

", + "idempotencyToken":true }, - { - "shape": "NotFoundException" + "meshName":{ + "shape":"ResourceName", + "documentation":"

The name to use for the service mesh.

" }, - { - "shape": "ServiceUnavailableException" + "spec":{ + "shape":"MeshSpec", + "documentation":"

The service mesh specification to apply.

" }, - { - "shape": "TooManyRequestsException" + "tags":{ + "shape":"TagList", + "documentation":"

Optional metadata that you can apply to the service mesh to assist with categorization and organization. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

" } - ], - "documentation": "

Deletes specified tags from a resource.

", - "idempotent": true - }, - "UpdateGatewayRoute": { - "name": "UpdateGatewayRoute", - "http": { - "method": "PUT", - "requestUri": "/v20190125/meshes/{meshName}/virtualGateway/{virtualGatewayName}/gatewayRoutes/{gatewayRouteName}", - "responseCode": 200 - }, - "input": { - "shape": "UpdateGatewayRouteInput" }, - "output": { - "shape": "UpdateGatewayRouteOutput" + "documentation":"" + }, + "CreateMeshOutput":{ + "type":"structure", + "required":["mesh"], + "members":{ + "mesh":{ + "shape":"MeshData", + "documentation":"

The full description of your service mesh following the create call.

" + } }, - "errors": [ - { - "shape": "BadRequestException" - }, - { - "shape": "ConflictException" + "documentation":"", + "payload":"mesh" + }, + "CreateRouteInput":{ + "type":"structure", + "required":[ + "meshName", + "routeName", + "spec", + "virtualRouterName" + ], + "members":{ + "clientToken":{ + "shape":"String", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Up to 36 letters, numbers, hyphens, and underscores are allowed.

", + "idempotencyToken":true }, - { - "shape": "ForbiddenException" + "meshName":{ + "shape":"ResourceName", + "documentation":"

The name of the service mesh to create the route in.

", + "location":"uri", + "locationName":"meshName" }, - { - "shape": "InternalServerErrorException" + "meshOwner":{ + "shape":"AccountId", + "documentation":"

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then the account that you specify must share the mesh with your account before you can create the resource in the service mesh. For more information about mesh sharing, see Working with shared meshes.

", + "location":"querystring", + "locationName":"meshOwner" }, - { - "shape": "LimitExceededException" + "routeName":{ + "shape":"ResourceName", + "documentation":"

The name to use for the route.

" }, - { - "shape": "NotFoundException" + "spec":{ + "shape":"RouteSpec", + "documentation":"

The route specification to apply.

" }, - { - "shape": "ServiceUnavailableException" + "tags":{ + "shape":"TagList", + "documentation":"

Optional metadata that you can apply to the route to assist with categorization and organization. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

" }, - { - "shape": "TooManyRequestsException" + "virtualRouterName":{ + "shape":"ResourceName", + "documentation":"

The name of the virtual router in which to create the route. If the virtual router is in a shared mesh, then you must be the owner of the virtual router resource.

", + "location":"uri", + "locationName":"virtualRouterName" } - ], - "documentation": "

Updates an existing gateway route that is associated to a specified virtual gateway in a\n service mesh.

", - "idempotent": true - }, - "UpdateMesh": { - "name": "UpdateMesh", - "http": { - "method": "PUT", - "requestUri": "/v20190125/meshes/{meshName}", - "responseCode": 200 - }, - "input": { - "shape": "UpdateMeshInput" }, - "output": { - "shape": "UpdateMeshOutput" + "documentation":"" + }, + "CreateRouteOutput":{ + "type":"structure", + "required":["route"], + "members":{ + "route":{ + "shape":"RouteData", + "documentation":"

The full description of your mesh following the create call.

" + } }, - "errors": [ - { - "shape": "BadRequestException" - }, - { - "shape": "ConflictException" + "documentation":"", + "payload":"route" + }, + "CreateVirtualGatewayInput":{ + "type":"structure", + "required":[ + "meshName", + "spec", + "virtualGatewayName" + ], + "members":{ + "clientToken":{ + "shape":"String", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Up to 36 letters, numbers, hyphens, and underscores are allowed.

", + "idempotencyToken":true }, - { - "shape": "ForbiddenException" + "meshName":{ + "shape":"ResourceName", + "documentation":"

The name of the service mesh to create the virtual gateway in.

", + "location":"uri", + "locationName":"meshName" }, - { - "shape": "InternalServerErrorException" + "meshOwner":{ + "shape":"AccountId", + "documentation":"

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then the account that you specify must share the mesh with your account before you can create the resource in the service mesh. For more information about mesh sharing, see Working with shared meshes.

", + "location":"querystring", + "locationName":"meshOwner" }, - { - "shape": "NotFoundException" + "spec":{ + "shape":"VirtualGatewaySpec", + "documentation":"

The virtual gateway specification to apply.

" }, - { - "shape": "ServiceUnavailableException" + "tags":{ + "shape":"TagList", + "documentation":"

Optional metadata that you can apply to the virtual gateway to assist with categorization and organization. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

" }, - { - "shape": "TooManyRequestsException" + "virtualGatewayName":{ + "shape":"ResourceName", + "documentation":"

The name to use for the virtual gateway.

" } - ], - "documentation": "

Updates an existing service mesh.

", - "idempotent": true + } }, - "UpdateRoute": { - "name": "UpdateRoute", - "http": { - "method": "PUT", - "requestUri": "/v20190125/meshes/{meshName}/virtualRouter/{virtualRouterName}/routes/{routeName}", - "responseCode": 200 - }, - "input": { - "shape": "UpdateRouteInput" - }, - "output": { - "shape": "UpdateRouteOutput" + "CreateVirtualGatewayOutput":{ + "type":"structure", + "required":["virtualGateway"], + "members":{ + "virtualGateway":{ + "shape":"VirtualGatewayData", + "documentation":"

The full description of your virtual gateway following the create call.

" + } }, - "errors": [ - { - "shape": "BadRequestException" - }, - { - "shape": "ConflictException" - }, - { - "shape": "ForbiddenException" + "payload":"virtualGateway" + }, + "CreateVirtualNodeInput":{ + "type":"structure", + "required":[ + "meshName", + "spec", + "virtualNodeName" + ], + "members":{ + "clientToken":{ + "shape":"String", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Up to 36 letters, numbers, hyphens, and underscores are allowed.

", + "idempotencyToken":true }, - { - "shape": "InternalServerErrorException" + "meshName":{ + "shape":"ResourceName", + "documentation":"

The name of the service mesh to create the virtual node in.

", + "location":"uri", + "locationName":"meshName" }, - { - "shape": "LimitExceededException" + "meshOwner":{ + "shape":"AccountId", + "documentation":"

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then the account that you specify must share the mesh with your account before you can create the resource in the service mesh. For more information about mesh sharing, see Working with shared meshes.

", + "location":"querystring", + "locationName":"meshOwner" }, - { - "shape": "NotFoundException" + "spec":{ + "shape":"VirtualNodeSpec", + "documentation":"

The virtual node specification to apply.

" }, - { - "shape": "ServiceUnavailableException" + "tags":{ + "shape":"TagList", + "documentation":"

Optional metadata that you can apply to the virtual node to assist with categorization and organization. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

" }, - { - "shape": "TooManyRequestsException" + "virtualNodeName":{ + "shape":"ResourceName", + "documentation":"

The name to use for the virtual node.

" } - ], - "documentation": "

Updates an existing route for a specified service mesh and virtual router.

", - "idempotent": true - }, - "UpdateVirtualGateway": { - "name": "UpdateVirtualGateway", - "http": { - "method": "PUT", - "requestUri": "/v20190125/meshes/{meshName}/virtualGateways/{virtualGatewayName}", - "responseCode": 200 }, - "input": { - "shape": "UpdateVirtualGatewayInput" - }, - "output": { - "shape": "UpdateVirtualGatewayOutput" + "documentation":"" + }, + "CreateVirtualNodeOutput":{ + "type":"structure", + "required":["virtualNode"], + "members":{ + "virtualNode":{ + "shape":"VirtualNodeData", + "documentation":"

The full description of your virtual node following the create call.

" + } }, - "errors": [ - { - "shape": "BadRequestException" - }, - { - "shape": "ConflictException" - }, - { - "shape": "ForbiddenException" + "documentation":"", + "payload":"virtualNode" + }, + "CreateVirtualRouterInput":{ + "type":"structure", + "required":[ + "meshName", + "spec", + "virtualRouterName" + ], + "members":{ + "clientToken":{ + "shape":"String", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Up to 36 letters, numbers, hyphens, and underscores are allowed.

", + "idempotencyToken":true }, - { - "shape": "InternalServerErrorException" + "meshName":{ + "shape":"ResourceName", + "documentation":"

The name of the service mesh to create the virtual router in.

", + "location":"uri", + "locationName":"meshName" }, - { - "shape": "LimitExceededException" + "meshOwner":{ + "shape":"AccountId", + "documentation":"

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then the account that you specify must share the mesh with your account before you can create the resource in the service mesh. For more information about mesh sharing, see Working with shared meshes.

", + "location":"querystring", + "locationName":"meshOwner" }, - { - "shape": "NotFoundException" + "spec":{ + "shape":"VirtualRouterSpec", + "documentation":"

The virtual router specification to apply.

" }, - { - "shape": "ServiceUnavailableException" + "tags":{ + "shape":"TagList", + "documentation":"

Optional metadata that you can apply to the virtual router to assist with categorization and organization. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

" }, - { - "shape": "TooManyRequestsException" + "virtualRouterName":{ + "shape":"ResourceName", + "documentation":"

The name to use for the virtual router.

" } - ], - "documentation": "

Updates an existing virtual gateway in a specified service mesh.

", - "idempotent": true - }, - "UpdateVirtualNode": { - "name": "UpdateVirtualNode", - "http": { - "method": "PUT", - "requestUri": "/v20190125/meshes/{meshName}/virtualNodes/{virtualNodeName}", - "responseCode": 200 }, - "input": { - "shape": "UpdateVirtualNodeInput" - }, - "output": { - "shape": "UpdateVirtualNodeOutput" + "documentation":"" + }, + "CreateVirtualRouterOutput":{ + "type":"structure", + "required":["virtualRouter"], + "members":{ + "virtualRouter":{ + "shape":"VirtualRouterData", + "documentation":"

The full description of your virtual router following the create call.

" + } }, - "errors": [ - { - "shape": "BadRequestException" - }, - { - "shape": "ConflictException" - }, - { - "shape": "ForbiddenException" + "documentation":"", + "payload":"virtualRouter" + }, + "CreateVirtualServiceInput":{ + "type":"structure", + "required":[ + "meshName", + "spec", + "virtualServiceName" + ], + "members":{ + "clientToken":{ + "shape":"String", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Up to 36 letters, numbers, hyphens, and underscores are allowed.

", + "idempotencyToken":true }, - { - "shape": "InternalServerErrorException" + "meshName":{ + "shape":"ResourceName", + "documentation":"

The name of the service mesh to create the virtual service in.

", + "location":"uri", + "locationName":"meshName" }, - { - "shape": "LimitExceededException" + "meshOwner":{ + "shape":"AccountId", + "documentation":"

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then the account that you specify must share the mesh with your account before you can create the resource in the service mesh. For more information about mesh sharing, see Working with shared meshes.

", + "location":"querystring", + "locationName":"meshOwner" }, - { - "shape": "NotFoundException" + "spec":{ + "shape":"VirtualServiceSpec", + "documentation":"

The virtual service specification to apply.

" }, - { - "shape": "ServiceUnavailableException" + "tags":{ + "shape":"TagList", + "documentation":"

Optional metadata that you can apply to the virtual service to assist with categorization and organization. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

" }, - { - "shape": "TooManyRequestsException" + "virtualServiceName":{ + "shape":"ServiceName", + "documentation":"

The name to use for the virtual service.

" } - ], - "documentation": "

Updates an existing virtual node in a specified service mesh.

", - "idempotent": true - }, - "UpdateVirtualRouter": { - "name": "UpdateVirtualRouter", - "http": { - "method": "PUT", - "requestUri": "/v20190125/meshes/{meshName}/virtualRouters/{virtualRouterName}", - "responseCode": 200 - }, - "input": { - "shape": "UpdateVirtualRouterInput" - }, - "output": { - "shape": "UpdateVirtualRouterOutput" }, - "errors": [ - { - "shape": "BadRequestException" - }, - { - "shape": "ConflictException" - }, - { - "shape": "ForbiddenException" - }, - { - "shape": "InternalServerErrorException" - }, - { - "shape": "LimitExceededException" - }, - { - "shape": "NotFoundException" - }, - { - "shape": "ServiceUnavailableException" - }, - { - "shape": "TooManyRequestsException" + "documentation":"" + }, + "CreateVirtualServiceOutput":{ + "type":"structure", + "required":["virtualService"], + "members":{ + "virtualService":{ + "shape":"VirtualServiceData", + "documentation":"

The full description of your virtual service following the create call.

" } + }, + "documentation":"", + "payload":"virtualService" + }, + "DeleteGatewayRouteInput":{ + "type":"structure", + "required":[ + "gatewayRouteName", + "meshName", + "virtualGatewayName" ], - "documentation": "

Updates an existing virtual router in a specified service mesh.

", - "idempotent": true + "members":{ + "gatewayRouteName":{ + "shape":"ResourceName", + "documentation":"

The name of the gateway route to delete.

", + "location":"uri", + "locationName":"gatewayRouteName" + }, + "meshName":{ + "shape":"ResourceName", + "documentation":"

The name of the service mesh to delete the gateway route from.

", + "location":"uri", + "locationName":"meshName" + }, + "meshOwner":{ + "shape":"AccountId", + "documentation":"

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", + "location":"querystring", + "locationName":"meshOwner" + }, + "virtualGatewayName":{ + "shape":"ResourceName", + "documentation":"

The name of the virtual gateway to delete the route from.

", + "location":"uri", + "locationName":"virtualGatewayName" + } + } }, - "UpdateVirtualService": { - "name": "UpdateVirtualService", - "http": { - "method": "PUT", - "requestUri": "/v20190125/meshes/{meshName}/virtualServices/{virtualServiceName}", - "responseCode": 200 - }, - "input": { - "shape": "UpdateVirtualServiceInput" + "DeleteGatewayRouteOutput":{ + "type":"structure", + "required":["gatewayRoute"], + "members":{ + "gatewayRoute":{ + "shape":"GatewayRouteData", + "documentation":"

The gateway route that was deleted.

" + } }, - "output": { - "shape": "UpdateVirtualServiceOutput" + "payload":"gatewayRoute" + }, + "DeleteMeshInput":{ + "type":"structure", + "required":["meshName"], + "members":{ + "meshName":{ + "shape":"ResourceName", + "documentation":"

The name of the service mesh to delete.

", + "location":"uri", + "locationName":"meshName" + } }, - "errors": [ - { - "shape": "BadRequestException" - }, - { - "shape": "ConflictException" - }, - { - "shape": "ForbiddenException" - }, - { - "shape": "InternalServerErrorException" - }, - { - "shape": "LimitExceededException" - }, - { - "shape": "NotFoundException" - }, - { - "shape": "ServiceUnavailableException" - }, - { - "shape": "TooManyRequestsException" + "documentation":"" + }, + "DeleteMeshOutput":{ + "type":"structure", + "required":["mesh"], + "members":{ + "mesh":{ + "shape":"MeshData", + "documentation":"

The service mesh that was deleted.

" } + }, + "documentation":"", + "payload":"mesh" + }, + "DeleteRouteInput":{ + "type":"structure", + "required":[ + "meshName", + "routeName", + "virtualRouterName" ], - "documentation": "

Updates an existing virtual service in a specified service mesh.

", - "idempotent": true - } - }, - "shapes": { - "VirtualRouterListener": { - "type": "structure", - "required": [ - "portMapping" + "members":{ + "meshName":{ + "shape":"ResourceName", + "documentation":"

The name of the service mesh to delete the route in.

", + "location":"uri", + "locationName":"meshName" + }, + "meshOwner":{ + "shape":"AccountId", + "documentation":"

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", + "location":"querystring", + "locationName":"meshOwner" + }, + "routeName":{ + "shape":"ResourceName", + "documentation":"

The name of the route to delete.

", + "location":"uri", + "locationName":"routeName" + }, + "virtualRouterName":{ + "shape":"ResourceName", + "documentation":"

The name of the virtual router to delete the route in.

", + "location":"uri", + "locationName":"virtualRouterName" + } + }, + "documentation":"" + }, + "DeleteRouteOutput":{ + "type":"structure", + "required":["route"], + "members":{ + "route":{ + "shape":"RouteData", + "documentation":"

The route that was deleted.

" + } + }, + "documentation":"", + "payload":"route" + }, + "DeleteVirtualGatewayInput":{ + "type":"structure", + "required":[ + "meshName", + "virtualGatewayName" ], - "members": { - "portMapping": { - "shape": "PortMapping" + "members":{ + "meshName":{ + "shape":"ResourceName", + "documentation":"

The name of the service mesh to delete the virtual gateway from.

", + "location":"uri", + "locationName":"meshName" + }, + "meshOwner":{ + "shape":"AccountId", + "documentation":"

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", + "location":"querystring", + "locationName":"meshOwner" + }, + "virtualGatewayName":{ + "shape":"ResourceName", + "documentation":"

The name of the virtual gateway to delete.

", + "location":"uri", + "locationName":"virtualGatewayName" } - }, - "documentation": "

An object that represents a virtual router listener.

" - }, - "VirtualRouterStatusCode": { - "type": "string", - "enum": [ - "ACTIVE", - "DELETED", - "INACTIVE" - ] + } }, - "TagKeyList": { - "type": "list", - "member": { - "shape": "TagKey" + "DeleteVirtualGatewayOutput":{ + "type":"structure", + "required":["virtualGateway"], + "members":{ + "virtualGateway":{ + "shape":"VirtualGatewayData", + "documentation":"

The virtual gateway that was deleted.

" + } }, - "min": 0, - "max": 50 + "payload":"virtualGateway" }, - "GrpcRetryPolicy": { - "type": "structure", - "required": [ - "maxRetries", - "perRetryTimeout" + "DeleteVirtualNodeInput":{ + "type":"structure", + "required":[ + "meshName", + "virtualNodeName" ], - "members": { - "grpcRetryEvents": { - "shape": "GrpcRetryPolicyEvents", - "documentation": "

Specify at least one of the valid values.

" - }, - "httpRetryEvents": { - "shape": "HttpRetryPolicyEvents", - "documentation": "

Specify at least one of the following values.

\n
    \n
  • \n

    \n server-error – HTTP status codes 500, 501,\n 502, 503, 504, 505, 506, 507, 508, 510, and 511

    \n
  • \n
  • \n

    \n gateway-error – HTTP status codes 502,\n 503, and 504

    \n
  • \n
  • \n

    \n client-error – HTTP status code 409

    \n
  • \n
  • \n

    \n stream-error – Retry on refused\n stream

    \n
  • \n
" - }, - "maxRetries": { - "shape": "MaxRetries", - "documentation": "

The maximum number of retry attempts.

" + "members":{ + "meshName":{ + "shape":"ResourceName", + "documentation":"

The name of the service mesh to delete the virtual node in.

", + "location":"uri", + "locationName":"meshName" }, - "perRetryTimeout": { - "shape": "Duration", - "documentation": "

An object that represents a duration of time.

" + "meshOwner":{ + "shape":"AccountId", + "documentation":"

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", + "location":"querystring", + "locationName":"meshOwner" }, - "tcpRetryEvents": { - "shape": "TcpRetryPolicyEvents", - "documentation": "

Specify a valid value.

" + "virtualNodeName":{ + "shape":"ResourceName", + "documentation":"

The name of the virtual node to delete.

", + "location":"uri", + "locationName":"virtualNodeName" } }, - "documentation": "

An object that represents a retry policy. Specify at least one value for at least one of the types of RetryEvents, a value for maxRetries, and a value for perRetryTimeout.

" + "documentation":"" }, - "CreateVirtualNodeOutput": { - "type": "structure", - "required": [ - "virtualNode" - ], - "members": { - "virtualNode": { - "shape": "VirtualNodeData", - "documentation": "

The full description of your virtual node following the create call.

" + "DeleteVirtualNodeOutput":{ + "type":"structure", + "required":["virtualNode"], + "members":{ + "virtualNode":{ + "shape":"VirtualNodeData", + "documentation":"

The virtual node that was deleted.

" } }, - "documentation": "", - "payload": "virtualNode" + "documentation":"", + "payload":"virtualNode" }, - "Logging": { - "type": "structure", - "members": { - "accessLog": { - "shape": "AccessLog", - "documentation": "

The access log configuration for a virtual node.

" + "DeleteVirtualRouterInput":{ + "type":"structure", + "required":[ + "meshName", + "virtualRouterName" + ], + "members":{ + "meshName":{ + "shape":"ResourceName", + "documentation":"

The name of the service mesh to delete the virtual router in.

", + "location":"uri", + "locationName":"meshName" + }, + "meshOwner":{ + "shape":"AccountId", + "documentation":"

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", + "location":"querystring", + "locationName":"meshOwner" + }, + "virtualRouterName":{ + "shape":"ResourceName", + "documentation":"

The name of the virtual router to delete.

", + "location":"uri", + "locationName":"virtualRouterName" } }, - "documentation": "

An object that represents the logging information for a virtual node.

" + "documentation":"" }, - "Long": { - "type": "long", - "box": true - }, - "UpdateVirtualRouterOutput": { - "type": "structure", - "required": [ - "virtualRouter" - ], - "members": { - "virtualRouter": { - "shape": "VirtualRouterData", - "documentation": "

A full description of the virtual router that was updated.

" + "DeleteVirtualRouterOutput":{ + "type":"structure", + "required":["virtualRouter"], + "members":{ + "virtualRouter":{ + "shape":"VirtualRouterData", + "documentation":"

The virtual router that was deleted.

" } }, - "documentation": "", - "payload": "virtualRouter" + "documentation":"", + "payload":"virtualRouter" }, - "ListVirtualRoutersOutput": { - "type": "structure", - "required": [ - "virtualRouters" + "DeleteVirtualServiceInput":{ + "type":"structure", + "required":[ + "meshName", + "virtualServiceName" ], - "members": { - "nextToken": { - "shape": "String", - "documentation": "

The nextToken value to include in a future ListVirtualRouters\n request. When the results of a ListVirtualRouters request exceed\n limit, you can use this value to retrieve the next page of results. This\n value is null when there are no more results to return.

" + "members":{ + "meshName":{ + "shape":"ResourceName", + "documentation":"

The name of the service mesh to delete the virtual service in.

", + "location":"uri", + "locationName":"meshName" }, - "virtualRouters": { - "shape": "VirtualRouterList", - "documentation": "

The list of existing virtual routers for the specified service mesh.

" + "meshOwner":{ + "shape":"AccountId", + "documentation":"

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", + "location":"querystring", + "locationName":"meshOwner" + }, + "virtualServiceName":{ + "shape":"ServiceName", + "documentation":"

The name of the virtual service to delete.

", + "location":"uri", + "locationName":"virtualServiceName" } }, - "documentation": "" + "documentation":"" }, - "CreateVirtualGatewayInput": { - "type": "structure", - "required": [ - "meshName", - "spec", - "virtualGatewayName" - ], - "members": { - "clientToken": { - "shape": "String", - "documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the\nrequest. Up to 36 letters, numbers, hyphens, and underscores are allowed.

", - "idempotencyToken": true - }, - "meshName": { - "shape": "ResourceName", - "documentation": "

The name of the service mesh to create the virtual gateway in.

", - "location": "uri", - "locationName": "meshName" - }, - "meshOwner": { - "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then\n the account that you specify must share the mesh with your account before you can create \n the resource in the service mesh. For more information about mesh sharing, see Working with shared meshes.

", - "location": "querystring", - "locationName": "meshOwner" - }, - "spec": { - "shape": "VirtualGatewaySpec", - "documentation": "

The virtual gateway specification to apply.

" - }, - "tags": { - "shape": "TagList", - "documentation": "

Optional metadata that you can apply to the virtual gateway to assist with\n categorization and organization. Each tag consists of a key and an optional value, both of\n which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have\n a maximum length of 256 characters.

" - }, - "virtualGatewayName": { - "shape": "ResourceName", - "documentation": "

The name to use for the virtual gateway.

" + "DeleteVirtualServiceOutput":{ + "type":"structure", + "required":["virtualService"], + "members":{ + "virtualService":{ + "shape":"VirtualServiceData", + "documentation":"

The virtual service that was deleted.

" } - } + }, + "documentation":"", + "payload":"virtualService" }, - "UpdateVirtualGatewayInput": { - "type": "structure", - "required": [ + "DescribeGatewayRouteInput":{ + "type":"structure", + "required":[ + "gatewayRouteName", "meshName", - "spec", "virtualGatewayName" ], - "members": { - "clientToken": { - "shape": "String", - "documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the\nrequest. Up to 36 letters, numbers, hyphens, and underscores are allowed.

", - "idempotencyToken": true - }, - "meshName": { - "shape": "ResourceName", - "documentation": "

The name of the service mesh that the virtual gateway resides in.

", - "location": "uri", - "locationName": "meshName" - }, - "meshOwner": { - "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", - "location": "querystring", - "locationName": "meshOwner" - }, - "spec": { - "shape": "VirtualGatewaySpec", - "documentation": "

The new virtual gateway specification to apply. This overwrites the existing\n data.

" - }, - "virtualGatewayName": { - "shape": "ResourceName", - "documentation": "

The name of the virtual gateway to update.

", - "location": "uri", - "locationName": "virtualGatewayName" + "members":{ + "gatewayRouteName":{ + "shape":"ResourceName", + "documentation":"

The name of the gateway route to describe.

", + "location":"uri", + "locationName":"gatewayRouteName" + }, + "meshName":{ + "shape":"ResourceName", + "documentation":"

The name of the service mesh that the gateway route resides in.

", + "location":"uri", + "locationName":"meshName" + }, + "meshOwner":{ + "shape":"AccountId", + "documentation":"

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", + "location":"querystring", + "locationName":"meshOwner" + }, + "virtualGatewayName":{ + "shape":"ResourceName", + "documentation":"

The name of the virtual gateway that the gateway route is associated with.

", + "location":"uri", + "locationName":"virtualGatewayName" } } }, - "ResourceMetadata": { - "type": "structure", - "required": [ - "arn", - "createdAt", - "lastUpdatedAt", - "meshOwner", - "resourceOwner", - "uid", - "version" - ], - "members": { - "arn": { - "shape": "Arn", - "documentation": "

The full Amazon Resource Name (ARN) for the resource.

" - }, - "createdAt": { - "shape": "Timestamp", - "documentation": "

The Unix epoch timestamp in seconds for when the resource was created.

" - }, - "lastUpdatedAt": { - "shape": "Timestamp", - "documentation": "

The Unix epoch timestamp in seconds for when the resource was last updated.

" - }, - "meshOwner": { - "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

" - }, - "resourceOwner": { - "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the resource owner. If the account ID is not your own, then it's\n the ID of the mesh owner or of another account that the mesh is shared with. For more information about mesh sharing, see Working with shared meshes.

" - }, - "uid": { - "shape": "String", - "documentation": "

The unique identifier for the resource.

" - }, - "version": { - "shape": "Long", - "documentation": "

The version of the resource. Resources are created at version 1, and this version is incremented each time that they're updated.

" + "DescribeGatewayRouteOutput":{ + "type":"structure", + "required":["gatewayRoute"], + "members":{ + "gatewayRoute":{ + "shape":"GatewayRouteData", + "documentation":"

The full description of your gateway route.

" } }, - "documentation": "

An object that represents metadata for a resource.

" + "payload":"gatewayRoute" }, - "ResourceInUseException": { - "type": "structure", - "members": { - "message": { - "shape": "String" + "DescribeMeshInput":{ + "type":"structure", + "required":["meshName"], + "members":{ + "meshName":{ + "shape":"ResourceName", + "documentation":"

The name of the service mesh to describe.

", + "location":"uri", + "locationName":"meshName" + }, + "meshOwner":{ + "shape":"AccountId", + "documentation":"

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", + "location":"querystring", + "locationName":"meshOwner" } }, - "documentation": "

You can't delete the specified resource because it's in use or required by another\n resource.

", - "exception": true, - "error": { - "code": "ResourceInUseException", - "httpStatusCode": 409, - "senderFault": true - } + "documentation":"" }, - "UpdateVirtualNodeOutput": { - "type": "structure", - "required": [ - "virtualNode" - ], - "members": { - "virtualNode": { - "shape": "VirtualNodeData", - "documentation": "

A full description of the virtual node that was updated.

" + "DescribeMeshOutput":{ + "type":"structure", + "required":["mesh"], + "members":{ + "mesh":{ + "shape":"MeshData", + "documentation":"

The full description of your service mesh.

" } }, - "documentation": "", - "payload": "virtualNode" + "documentation":"", + "payload":"mesh" }, - "ListRoutesOutput": { - "type": "structure", - "required": [ - "routes" + "DescribeRouteInput":{ + "type":"structure", + "required":[ + "meshName", + "routeName", + "virtualRouterName" ], - "members": { - "nextToken": { - "shape": "String", - "documentation": "

The nextToken value to include in a future ListRoutes request.\n When the results of a ListRoutes request exceed limit, you can\n use this value to retrieve the next page of results. This value is null when\n there are no more results to return.

" - }, - "routes": { - "shape": "RouteList", - "documentation": "

The list of existing routes for the specified service mesh and virtual router.

" + "members":{ + "meshName":{ + "shape":"ResourceName", + "documentation":"

The name of the service mesh that the route resides in.

", + "location":"uri", + "locationName":"meshName" + }, + "meshOwner":{ + "shape":"AccountId", + "documentation":"

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", + "location":"querystring", + "locationName":"meshOwner" + }, + "routeName":{ + "shape":"ResourceName", + "documentation":"

The name of the route to describe.

", + "location":"uri", + "locationName":"routeName" + }, + "virtualRouterName":{ + "shape":"ResourceName", + "documentation":"

The name of the virtual router that the route is associated with.

", + "location":"uri", + "locationName":"virtualRouterName" + } + }, + "documentation":"" + }, + "DescribeRouteOutput":{ + "type":"structure", + "required":["route"], + "members":{ + "route":{ + "shape":"RouteData", + "documentation":"

The full description of your route.

" + } + }, + "documentation":"", + "payload":"route" + }, + "DescribeVirtualGatewayInput":{ + "type":"structure", + "required":[ + "meshName", + "virtualGatewayName" + ], + "members":{ + "meshName":{ + "shape":"ResourceName", + "documentation":"

The name of the service mesh that the gateway route resides in.

", + "location":"uri", + "locationName":"meshName" + }, + "meshOwner":{ + "shape":"AccountId", + "documentation":"

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", + "location":"querystring", + "locationName":"meshOwner" + }, + "virtualGatewayName":{ + "shape":"ResourceName", + "documentation":"

The name of the virtual gateway to describe.

", + "location":"uri", + "locationName":"virtualGatewayName" + } + } + }, + "DescribeVirtualGatewayOutput":{ + "type":"structure", + "required":["virtualGateway"], + "members":{ + "virtualGateway":{ + "shape":"VirtualGatewayData", + "documentation":"

The full description of your virtual gateway.

" } }, - "documentation": "" + "payload":"virtualGateway" }, - "VirtualServiceBackend": { - "type": "structure", - "required": [ - "virtualServiceName" + "DescribeVirtualNodeInput":{ + "type":"structure", + "required":[ + "meshName", + "virtualNodeName" ], - "members": { - "clientPolicy": { - "shape": "ClientPolicy", - "documentation": "

A reference to an object that represents the client policy for a backend.

" + "members":{ + "meshName":{ + "shape":"ResourceName", + "documentation":"

The name of the service mesh that the virtual node resides in.

", + "location":"uri", + "locationName":"meshName" + }, + "meshOwner":{ + "shape":"AccountId", + "documentation":"

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", + "location":"querystring", + "locationName":"meshOwner" }, - "virtualServiceName": { - "shape": "ServiceName", - "documentation": "

The name of the virtual service that is acting as a virtual node backend.

" + "virtualNodeName":{ + "shape":"ResourceName", + "documentation":"

The name of the virtual node to describe.

", + "location":"uri", + "locationName":"virtualNodeName" } }, - "documentation": "

An object that represents a virtual service backend for a virtual node.

" + "documentation":"" }, - "BadRequestException": { - "type": "structure", - "members": { - "message": { - "shape": "String" + "DescribeVirtualNodeOutput":{ + "type":"structure", + "required":["virtualNode"], + "members":{ + "virtualNode":{ + "shape":"VirtualNodeData", + "documentation":"

The full description of your virtual node.

" } }, - "documentation": "

The request syntax was malformed. Check your request syntax and try again.

", - "exception": true, - "error": { - "code": "BadRequestException", - "httpStatusCode": 400, - "senderFault": true - } + "documentation":"", + "payload":"virtualNode" }, - "HttpGatewayRouteMatch": { - "type": "structure", - "required": [ - "prefix" + "DescribeVirtualRouterInput":{ + "type":"structure", + "required":[ + "meshName", + "virtualRouterName" ], - "members": { - "prefix": { - "shape": "String", - "documentation": "

Specifies the path to match requests with. This parameter must always start with\n /, which by itself matches all requests to the virtual service name. You\n can also match for path-based routing of requests. For example, if your virtual service\n name is my-service.local and you want the route to match requests to\n my-service.local/metrics, your prefix should be\n /metrics.

" + "members":{ + "meshName":{ + "shape":"ResourceName", + "documentation":"

The name of the service mesh that the virtual router resides in.

", + "location":"uri", + "locationName":"meshName" + }, + "meshOwner":{ + "shape":"AccountId", + "documentation":"

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", + "location":"querystring", + "locationName":"meshOwner" + }, + "virtualRouterName":{ + "shape":"ResourceName", + "documentation":"

The name of the virtual router to describe.

", + "location":"uri", + "locationName":"virtualRouterName" } }, - "documentation": "

An object that represents the criteria for determining a request match.

" + "documentation":"" }, - "GrpcRouteMetadataList": { - "type": "list", - "member": { - "shape": "GrpcRouteMetadata" + "DescribeVirtualRouterOutput":{ + "type":"structure", + "required":["virtualRouter"], + "members":{ + "virtualRouter":{ + "shape":"VirtualRouterData", + "documentation":"

The full description of your virtual router.

" + } }, - "min": 1, - "max": 10 + "documentation":"", + "payload":"virtualRouter" }, - "ListenerTlsMode": { - "type": "string", - "enum": [ - "DISABLED", - "PERMISSIVE", - "STRICT" - ] - }, - "HealthCheckPolicy": { - "type": "structure", - "required": [ - "healthyThreshold", - "intervalMillis", - "protocol", - "timeoutMillis", - "unhealthyThreshold" + "DescribeVirtualServiceInput":{ + "type":"structure", + "required":[ + "meshName", + "virtualServiceName" ], - "members": { - "healthyThreshold": { - "shape": "HealthCheckThreshold", - "documentation": "

The number of consecutive successful health checks that must occur before declaring\n listener healthy.

" - }, - "intervalMillis": { - "shape": "HealthCheckIntervalMillis", - "documentation": "

The time period in milliseconds between each health check execution.

" - }, - "path": { - "shape": "String", - "documentation": "

The destination path for the health check request. This value is only used if the\n specified protocol is HTTP or HTTP/2. For any other protocol, this value is ignored.

" + "members":{ + "meshName":{ + "shape":"ResourceName", + "documentation":"

The name of the service mesh that the virtual service resides in.

", + "location":"uri", + "locationName":"meshName" }, - "port": { - "shape": "PortNumber", - "documentation": "

The destination port for the health check request. This port must match the port defined\n in the PortMapping for the listener.

" + "meshOwner":{ + "shape":"AccountId", + "documentation":"

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", + "location":"querystring", + "locationName":"meshOwner" }, - "protocol": { - "shape": "PortProtocol", - "documentation": "

The protocol for the health check request. If you specify grpc, then your\n service must conform to the GRPC Health\n Checking Protocol.

" - }, - "timeoutMillis": { - "shape": "HealthCheckTimeoutMillis", - "documentation": "

The amount of time to wait when receiving a response from the health check, in\n milliseconds.

" - }, - "unhealthyThreshold": { - "shape": "HealthCheckThreshold", - "documentation": "

The number of consecutive failed health checks that must occur before declaring a\n virtual node unhealthy.

" + "virtualServiceName":{ + "shape":"ServiceName", + "documentation":"

The name of the virtual service to describe.

", + "location":"uri", + "locationName":"virtualServiceName" } }, - "documentation": "

An object that represents the health check policy for a virtual node's listener.

" + "documentation":"" }, - "VirtualGatewayHealthCheckTimeoutMillis": { - "type": "long", - "box": true, - "min": 2000, - "max": 60000 - }, - "EgressFilter": { - "type": "structure", - "required": [ - "type" - ], - "members": { - "type": { - "shape": "EgressFilterType", - "documentation": "

The egress filter type. By default, the type is DROP_ALL, which allows\n egress only from virtual nodes to other defined resources in the service mesh (and any\n traffic to *.amazonaws.com for AWS API calls). You can set the egress filter\n type to ALLOW_ALL to allow egress to any endpoint inside or outside of the\n service mesh.

" + "DescribeVirtualServiceOutput":{ + "type":"structure", + "required":["virtualService"], + "members":{ + "virtualService":{ + "shape":"VirtualServiceData", + "documentation":"

The full description of your virtual service.

" } }, - "documentation": "

An object that represents the egress filter rules for a service mesh.

" + "documentation":"", + "payload":"virtualService" }, - "VirtualServiceList": { - "type": "list", - "member": { - "shape": "VirtualServiceRef" - } + "DnsServiceDiscovery":{ + "type":"structure", + "required":["hostname"], + "members":{ + "hostname":{ + "shape":"Hostname", + "documentation":"

Specifies the DNS service discovery hostname for the virtual node.

" + } + }, + "documentation":"

An object that represents the DNS service discovery information for your virtual node.

" }, - "ClientPolicy": { - "type": "structure", - "members": { - "tls": { - "shape": "ClientPolicyTls", - "documentation": "

A reference to an object that represents a Transport Layer Security (TLS) client policy.

" + "Duration":{ + "type":"structure", + "members":{ + "unit":{ + "shape":"DurationUnit", + "documentation":"

A unit of time.

" + }, + "value":{ + "shape":"DurationValue", + "documentation":"

A number of time units.

" } }, - "documentation": "

An object that represents a client policy.

" + "documentation":"

An object that represents a duration of time.

" }, - "VirtualGatewayHealthCheckIntervalMillis": { - "type": "long", - "box": true, - "min": 5000, - "max": 300000 + "DurationUnit":{ + "type":"string", + "enum":[ + "s", + "ms" + ] }, - "Boolean": { - "type": "boolean", - "box": true + "DurationValue":{ + "type":"long", + "box":true, + "min":0 }, - "VirtualGatewaySpec": { - "type": "structure", - "required": [ - "listeners" - ], - "members": { - "backendDefaults": { - "shape": "VirtualGatewayBackendDefaults", - "documentation": "

A reference to an object that represents the defaults for backends.

" - }, - "listeners": { - "shape": "VirtualGatewayListeners", - "documentation": "

The listeners that the mesh endpoint is expected to receive inbound traffic from. You\n can specify one listener.

" - }, - "logging": { - "shape": "VirtualGatewayLogging" + "EgressFilter":{ + "type":"structure", + "required":["type"], + "members":{ + "type":{ + "shape":"EgressFilterType", + "documentation":"

The egress filter type. By default, the type is DROP_ALL, which allows egress only from virtual nodes to other defined resources in the service mesh (and any traffic to *.amazonaws.com for AWS API calls). You can set the egress filter type to ALLOW_ALL to allow egress to any endpoint inside or outside of the service mesh.

" } }, - "documentation": "

An object that represents the specification of a service mesh resource.

" + "documentation":"

An object that represents the egress filter rules for a service mesh.

" }, - "HttpRetryPolicyEvent": { - "type": "string", - "min": 1, - "max": 25 + "EgressFilterType":{ + "type":"string", + "enum":[ + "ALLOW_ALL", + "DROP_ALL" + ] }, - "VirtualGatewayFileAccessLog": { - "type": "structure", - "required": [ - "path" - ], - "members": { - "path": { - "shape": "FilePath", - "documentation": "

The file path to write access logs to. You can use /dev/stdout to send\n access logs to standard out and configure your Envoy container to use a log driver, such as\n awslogs, to export the access logs to a log storage service such as Amazon\n CloudWatch Logs. You can also specify a path in the Envoy container's file system to write\n the files to disk.

" + "FileAccessLog":{ + "type":"structure", + "required":["path"], + "members":{ + "path":{ + "shape":"FilePath", + "documentation":"

The file path to write access logs to. You can use /dev/stdout to send access logs to standard out and configure your Envoy container to use a log driver, such as awslogs, to export the access logs to a log storage service such as Amazon CloudWatch Logs. You can also specify a path in the Envoy container's file system to write the files to disk.

The Envoy process must have write permissions to the path that you specify here. Otherwise, Envoy fails to bootstrap properly.

" } }, - "documentation": "

An object that represents an access log file.

" + "documentation":"

An object that represents an access log file.

" }, - "DescribeVirtualServiceOutput": { - "type": "structure", - "required": [ - "virtualService" - ], - "members": { - "virtualService": { - "shape": "VirtualServiceData", - "documentation": "

The full description of your virtual service.

" - } + "FilePath":{ + "type":"string", + "max":255, + "min":1 + }, + "ForbiddenException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

You don't have permissions to perform this action.

", + "error":{ + "httpStatusCode":403, + "senderFault":true }, - "documentation": "", - "payload": "virtualService" + "exception":true }, - "CreateGatewayRouteInput": { - "type": "structure", - "required": [ + "GatewayRouteData":{ + "type":"structure", + "required":[ "gatewayRouteName", "meshName", + "metadata", "spec", + "status", "virtualGatewayName" ], - "members": { - "clientToken": { - "shape": "String", - "documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the\nrequest. Up to 36 letters, numbers, hyphens, and underscores are allowed.

", - "idempotencyToken": true - }, - "gatewayRouteName": { - "shape": "ResourceName", - "documentation": "

The name to use for the gateway route.

" - }, - "meshName": { - "shape": "ResourceName", - "documentation": "

The name of the service mesh to create the gateway route in.

", - "location": "uri", - "locationName": "meshName" - }, - "meshOwner": { - "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then\n the account that you specify must share the mesh with your account before you can create \n the resource in the service mesh. For more information about mesh sharing, see Working with shared meshes.

", - "location": "querystring", - "locationName": "meshOwner" - }, - "spec": { - "shape": "GatewayRouteSpec", - "documentation": "

The gateway route specification to apply.

" - }, - "tags": { - "shape": "TagList", - "documentation": "

Optional metadata that you can apply to the gateway route to assist with categorization\n and organization. Each tag consists of a key and an optional value, both of which you\n define. Tag keys can have a maximum character length of 128 characters, and tag values can have\n a maximum length of 256 characters.

" - }, - "virtualGatewayName": { - "shape": "ResourceName", - "documentation": "

The name of the virtual gateway to associate the gateway route with. If the virtual\n gateway is in a shared mesh, then you must be the owner of the virtual gateway\n resource.

", - "location": "uri", - "locationName": "virtualGatewayName" - } - } - }, - "CertificateAuthorityArns": { - "type": "list", - "member": { - "shape": "Arn" - }, - "min": 1, - "max": 3 - }, - "DescribeVirtualNodeOutput": { - "type": "structure", - "required": [ - "virtualNode" - ], - "members": { - "virtualNode": { - "shape": "VirtualNodeData", - "documentation": "

The full description of your virtual node.

" + "members":{ + "gatewayRouteName":{ + "shape":"ResourceName", + "documentation":"

The name of the gateway route.

" + }, + "meshName":{ + "shape":"ResourceName", + "documentation":"

The name of the service mesh that the resource resides in.

" + }, + "metadata":{"shape":"ResourceMetadata"}, + "spec":{ + "shape":"GatewayRouteSpec", + "documentation":"

The specifications of the gateway route.

" + }, + "status":{ + "shape":"GatewayRouteStatus", + "documentation":"

The status of the gateway route.

" + }, + "virtualGatewayName":{ + "shape":"ResourceName", + "documentation":"

The virtual gateway that the gateway route is associated with.

" } }, - "documentation": "", - "payload": "virtualNode" + "documentation":"

An object that represents a gateway route returned by a describe operation.

" }, - "AwsCloudMapName": { - "type": "string", - "min": 1, - "max": 1024, - "pattern": "((?=^.{1,127}$)^([a-zA-Z0-9_][a-zA-Z0-9-_]{0,61}[a-zA-Z0-9_]|[a-zA-Z0-9])(.([a-zA-Z0-9_][a-zA-Z0-9-_]{0,61}[a-zA-Z0-9_]|[a-zA-Z0-9]))*$)|(^.$)" + "GatewayRouteList":{ + "type":"list", + "member":{"shape":"GatewayRouteRef"} }, - "VirtualGatewayData": { - "type": "structure", - "required": [ + "GatewayRouteRef":{ + "type":"structure", + "required":[ + "arn", + "createdAt", + "gatewayRouteName", + "lastUpdatedAt", "meshName", - "metadata", - "spec", - "status", + "meshOwner", + "resourceOwner", + "version", "virtualGatewayName" ], - "members": { - "meshName": { - "shape": "ResourceName", - "documentation": "

The name of the service mesh that the virtual gateway resides in.

" + "members":{ + "arn":{ + "shape":"Arn", + "documentation":"

The full Amazon Resource Name (ARN) for the gateway route.

" + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

The Unix epoch timestamp in seconds for when the resource was created.

" + }, + "gatewayRouteName":{ + "shape":"ResourceName", + "documentation":"

The name of the gateway route.

" }, - "metadata": { - "shape": "ResourceMetadata" + "lastUpdatedAt":{ + "shape":"Timestamp", + "documentation":"

The Unix epoch timestamp in seconds for when the resource was last updated.

" }, - "spec": { - "shape": "VirtualGatewaySpec", - "documentation": "

The specifications of the virtual gateway.

" + "meshName":{ + "shape":"ResourceName", + "documentation":"

The name of the service mesh that the resource resides in.

" }, - "status": { - "shape": "VirtualGatewayStatus", - "documentation": "

The current status of the virtual gateway.

" + "meshOwner":{ + "shape":"AccountId", + "documentation":"

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

" }, - "virtualGatewayName": { - "shape": "ResourceName", - "documentation": "

The name of the virtual gateway.

" + "resourceOwner":{ + "shape":"AccountId", + "documentation":"

The AWS IAM account ID of the resource owner. If the account ID is not your own, then it's the ID of the mesh owner or of another account that the mesh is shared with. For more information about mesh sharing, see Working with shared meshes.

" + }, + "version":{ + "shape":"Long", + "documentation":"

The version of the resource. Resources are created at version 1, and this version is incremented each time that they're updated.

" + }, + "virtualGatewayName":{ + "shape":"ResourceName", + "documentation":"

The virtual gateway that the gateway route is associated with.

" } }, - "documentation": "

An object that represents a virtual gateway returned by a describe operation.

" + "documentation":"

An object that represents a gateway route returned by a list operation.

" }, - "CreateRouteOutput": { - "type": "structure", - "required": [ - "route" - ], - "members": { - "route": { - "shape": "RouteData", - "documentation": "

The full description of your mesh following the create call.

" + "GatewayRouteSpec":{ + "type":"structure", + "members":{ + "grpcRoute":{ + "shape":"GrpcGatewayRoute", + "documentation":"

An object that represents the specification of a gRPC gateway route.

" + }, + "http2Route":{ + "shape":"HttpGatewayRoute", + "documentation":"

An object that represents the specification of an HTTP/2 gateway route.

" + }, + "httpRoute":{ + "shape":"HttpGatewayRoute", + "documentation":"

An object that represents the specification of an HTTP gateway route.

" } }, - "documentation": "", - "payload": "route" + "documentation":"

An object that represents a gateway route specification. Specify one gateway route type.

" }, - "VirtualGatewayListener": { - "type": "structure", - "required": [ - "portMapping" - ], - "members": { - "healthCheck": { - "shape": "VirtualGatewayHealthCheckPolicy", - "documentation": "

The health check information for the listener.

" - }, - "portMapping": { - "shape": "VirtualGatewayPortMapping", - "documentation": "

The port mapping information for the listener.

" - }, - "tls": { - "shape": "VirtualGatewayListenerTls", - "documentation": "

A reference to an object that represents the Transport Layer Security (TLS) properties for the listener.

" + "GatewayRouteStatus":{ + "type":"structure", + "required":["status"], + "members":{ + "status":{ + "shape":"GatewayRouteStatusCode", + "documentation":"

The current status for the gateway route.

" } }, - "documentation": "

An object that represents a listener for a virtual gateway.

" + "documentation":"

An object that represents the current status of a gateway route.

" }, - "DnsServiceDiscovery": { - "type": "structure", - "required": [ - "hostname" - ], - "members": { - "hostname": { - "shape": "Hostname", - "documentation": "

Specifies the DNS service discovery hostname for the virtual node.

" + "GatewayRouteStatusCode":{ + "type":"string", + "enum":[ + "ACTIVE", + "INACTIVE", + "DELETED" + ] + }, + "GatewayRouteTarget":{ + "type":"structure", + "required":["virtualService"], + "members":{ + "virtualService":{ + "shape":"GatewayRouteVirtualService", + "documentation":"

An object that represents a virtual service gateway route target.

" } }, - "documentation": "

An object that represents the DNS service discovery information for your virtual\n node.

" + "documentation":"

An object that represents a gateway route target.

" }, - "VirtualGatewayPortMapping": { - "type": "structure", - "required": [ - "port", - "protocol" + "GatewayRouteVirtualService":{ + "type":"structure", + "required":["virtualServiceName"], + "members":{ + "virtualServiceName":{ + "shape":"ResourceName", + "documentation":"

The name of the virtual service that traffic is routed to.

" + } + }, + "documentation":"

An object that represents the virtual service that traffic is routed to.

" + }, + "GrpcGatewayRoute":{ + "type":"structure", + "required":[ + "action", + "match" ], - "members": { - "port": { - "shape": "PortNumber", - "documentation": "

The port used for the port mapping. Specify one protocol.

" + "members":{ + "action":{ + "shape":"GrpcGatewayRouteAction", + "documentation":"

An object that represents the action to take if a match is determined.

" }, - "protocol": { - "shape": "VirtualGatewayPortProtocol", - "documentation": "

The protocol used for the port mapping.

" + "match":{ + "shape":"GrpcGatewayRouteMatch", + "documentation":"

An object that represents the criteria for determining a request match.

" } }, - "documentation": "

An object that represents a port mapping.

" + "documentation":"

An object that represents a gRPC gateway route.

" }, - "DeleteVirtualGatewayOutput": { - "type": "structure", - "required": [ - "virtualGateway" - ], - "members": { - "virtualGateway": { - "shape": "VirtualGatewayData", - "documentation": "

The virtual gateway that was deleted.

" + "GrpcGatewayRouteAction":{ + "type":"structure", + "required":["target"], + "members":{ + "target":{ + "shape":"GatewayRouteTarget", + "documentation":"

An object that represents the target that traffic is routed to when a request matches the gateway route.

" } }, - "payload": "virtualGateway" + "documentation":"

An object that represents the action to take if a match is determined.

" }, - "DeleteRouteInput": { - "type": "structure", - "required": [ - "meshName", - "routeName", - "virtualRouterName" - ], - "members": { - "meshName": { - "shape": "ResourceName", - "documentation": "

The name of the service mesh to delete the route in.

", - "location": "uri", - "locationName": "meshName" - }, - "meshOwner": { - "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", - "location": "querystring", - "locationName": "meshOwner" - }, - "routeName": { - "shape": "ResourceName", - "documentation": "

The name of the route to delete.

", - "location": "uri", - "locationName": "routeName" - }, - "virtualRouterName": { - "shape": "ResourceName", - "documentation": "

The name of the virtual router to delete the route in.

", - "location": "uri", - "locationName": "virtualRouterName" - } - }, - "documentation": "" - }, - "VirtualNodeData": { - "type": "structure", - "required": [ - "meshName", - "metadata", - "spec", - "status", - "virtualNodeName" + "GrpcGatewayRouteMatch":{ + "type":"structure", + "members":{ + "serviceName":{ + "shape":"ServiceName", + "documentation":"

The fully qualified domain name for the service to match from the request.

" + } + }, + "documentation":"

An object that represents the criteria for determining a request match.

" + }, + "GrpcRetryPolicy":{ + "type":"structure", + "required":[ + "maxRetries", + "perRetryTimeout" ], - "members": { - "meshName": { - "shape": "ResourceName", - "documentation": "

The name of the service mesh that the virtual node resides in.

" + "members":{ + "grpcRetryEvents":{ + "shape":"GrpcRetryPolicyEvents", + "documentation":"

Specify at least one of the valid values.

" }, - "metadata": { - "shape": "ResourceMetadata", - "documentation": "

The associated metadata for the virtual node.

" + "httpRetryEvents":{ + "shape":"HttpRetryPolicyEvents", + "documentation":"

Specify at least one of the following values.

  • server-error – HTTP status codes 500, 501, 502, 503, 504, 505, 506, 507, 508, 510, and 511

  • gateway-error – HTTP status codes 502, 503, and 504

  • client-error – HTTP status code 409

  • stream-error – Retry on refused stream

" }, - "spec": { - "shape": "VirtualNodeSpec", - "documentation": "

The specifications of the virtual node.

" + "maxRetries":{ + "shape":"MaxRetries", + "documentation":"

The maximum number of retry attempts.

" }, - "status": { - "shape": "VirtualNodeStatus", - "documentation": "

The current status for the virtual node.

" + "perRetryTimeout":{ + "shape":"Duration", + "documentation":"

The timeout for each retry attempt.

" }, - "virtualNodeName": { - "shape": "ResourceName", - "documentation": "

The name of the virtual node.

" + "tcpRetryEvents":{ + "shape":"TcpRetryPolicyEvents", + "documentation":"

Specify a valid value. The event occurs before any processing of a request has started and is encountered when the upstream is temporarily or permanently unavailable.

" } }, - "documentation": "

An object that represents a virtual node returned by a describe operation.

" - }, - "UntagResourceOutput": { - "type": "structure", - "members": { }, - "documentation": "" - }, - "ListGatewayRoutesLimit": { - "type": "integer", - "box": true, - "min": 1, - "max": 100 + "documentation":"

An object that represents a retry policy. Specify at least one value for at least one of the types of RetryEvents, a value for maxRetries, and a value for perRetryTimeout.

" }, - "TcpRetryPolicyEvent": { - "type": "string", - "enum": [ - "connection-error" + "GrpcRetryPolicyEvent":{ + "type":"string", + "enum":[ + "cancelled", + "deadline-exceeded", + "internal", + "resource-exhausted", + "unavailable" ] }, - "VirtualGatewayListenerTls": { - "type": "structure", - "required": [ - "certificate", - "mode" + "GrpcRetryPolicyEvents":{ + "type":"list", + "member":{"shape":"GrpcRetryPolicyEvent"}, + "max":5, + "min":1 + }, + "GrpcRoute":{ + "type":"structure", + "required":[ + "action", + "match" ], - "members": { - "certificate": { - "shape": "VirtualGatewayListenerTlsCertificate", - "documentation": "

An object that represents a Transport Layer Security (TLS) certificate.

" + "members":{ + "action":{ + "shape":"GrpcRouteAction", + "documentation":"

An object that represents the action to take if a match is determined.

" + }, + "match":{ + "shape":"GrpcRouteMatch", + "documentation":"

An object that represents the criteria for determining a request match.

" + }, + "retryPolicy":{ + "shape":"GrpcRetryPolicy", + "documentation":"

An object that represents a retry policy.

" }, - "mode": { - "shape": "VirtualGatewayListenerTlsMode", - "documentation": "

Specify one of the following modes.

\n
    \n
  • \n

    \n STRICT – Listener only accepts connections with TLS\n enabled.

    \n
  • \n
  • \n

    \n PERMISSIVE – Listener accepts connections with or\n without TLS enabled.

    \n
  • \n
  • \n

    \n DISABLED – Listener only accepts connections without\n TLS.

    \n
  • \n
" + "timeout":{ + "shape":"GrpcTimeout", + "documentation":"

An object that represents types of timeouts.

" } }, - "documentation": "

An object that represents the Transport Layer Security (TLS) properties for a listener.

" + "documentation":"

An object that represents a gRPC route type.

" }, - "Backend": { - "type": "structure", - "members": { - "virtualService": { - "shape": "VirtualServiceBackend", - "documentation": "

Specifies a virtual service to use as a backend for a virtual node.

" + "GrpcRouteAction":{ + "type":"structure", + "required":["weightedTargets"], + "members":{ + "weightedTargets":{ + "shape":"WeightedTargets", + "documentation":"

An object that represents the targets that traffic is routed to when a request matches the route.

" } }, - "documentation": "

An object that represents the backends that a virtual node is expected to send outbound\n traffic to.

" + "documentation":"

An object that represents the action to take if a match is determined.

" }, - "ListMeshesInput": { - "type": "structure", - "members": { - "limit": { - "shape": "ListMeshesLimit", - "documentation": "

The maximum number of results returned by ListMeshes in paginated output.\n When you use this parameter, ListMeshes returns only limit\n results in a single page along with a nextToken response element. You can see\n the remaining results of the initial request by sending another ListMeshes\n request with the returned nextToken value. This value can be between\n 1 and 100. If you don't use this parameter,\n ListMeshes returns up to 100 results and a\n nextToken value if applicable.

", - "location": "querystring", - "locationName": "limit" + "GrpcRouteMatch":{ + "type":"structure", + "members":{ + "metadata":{ + "shape":"GrpcRouteMetadataList", + "documentation":"

An object that represents the data to match from the request.

" }, - "nextToken": { - "shape": "String", - "documentation": "

The nextToken value returned from a previous paginated\n ListMeshes request where limit was used and the results\n exceeded the value of that parameter. Pagination continues from the end of the previous\n results that returned the nextToken value.

\n \n

This token should be treated as an opaque identifier that is used only to\n retrieve the next items in a list and not for other programmatic purposes.

\n
", - "location": "querystring", - "locationName": "nextToken" + "methodName":{ + "shape":"MethodName", + "documentation":"

The method name to match from the request. If you specify a name, you must also specify a serviceName.

" + }, + "serviceName":{ + "shape":"ServiceName", + "documentation":"

The fully qualified domain name for the service to match from the request.

" } }, - "documentation": "" + "documentation":"

An object that represents the criteria for determining a request match.

" }, - "VirtualGatewayListenerTlsFileCertificate": { - "type": "structure", - "required": [ - "certificateChain", - "privateKey" - ], - "members": { - "certificateChain": { - "shape": "FilePath", - "documentation": "

The certificate chain for the certificate.

" + "GrpcRouteMetadata":{ + "type":"structure", + "required":["name"], + "members":{ + "invert":{ + "shape":"Boolean", + "documentation":"

Specify True to match anything except the match criteria. The default value is False.

" }, - "privateKey": { - "shape": "FilePath", - "documentation": "

The private key for a certificate stored on the file system of the mesh endpoint that\n the proxy is running on.

" + "match":{ + "shape":"GrpcRouteMetadataMatchMethod", + "documentation":"

An object that represents the data to match from the request.

" + }, + "name":{ + "shape":"HeaderName", + "documentation":"

The name of the route.

" } }, - "documentation": "

An object that represents a local file certificate.\n The certificate must meet specific requirements and you must have proxy authorization enabled. For more information, see Transport Layer Security (TLS).

" + "documentation":"

An object that represents the match metadata for the route.

" }, - "ListGatewayRoutesInput": { - "type": "structure", - "required": [ - "meshName", - "virtualGatewayName" - ], - "members": { - "limit": { - "shape": "ListGatewayRoutesLimit", - "documentation": "

The maximum number of results returned by ListGatewayRoutes in paginated\n output. When you use this parameter, ListGatewayRoutes returns only\n limit results in a single page along with a nextToken response\n element. You can see the remaining results of the initial request by sending another\n ListGatewayRoutes request with the returned nextToken value.\n This value can be between 1 and 100. If you don't use this\n parameter, ListGatewayRoutes returns up to 100 results and a\n nextToken value if applicable.

", - "location": "querystring", - "locationName": "limit" - }, - "meshName": { - "shape": "ResourceName", - "documentation": "

The name of the service mesh to list gateway routes in.

", - "location": "uri", - "locationName": "meshName" - }, - "meshOwner": { - "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", - "location": "querystring", - "locationName": "meshOwner" - }, - "nextToken": { - "shape": "String", - "documentation": "

The nextToken value returned from a previous paginated\n ListGatewayRoutes request where limit was used and the results\n exceeded the value of that parameter. Pagination continues from the end of the previous\n results that returned the nextToken value.

", - "location": "querystring", - "locationName": "nextToken" - }, - "virtualGatewayName": { - "shape": "ResourceName", - "documentation": "

The name of the virtual gateway to list gateway routes in.

", - "location": "uri", - "locationName": "virtualGatewayName" - } - } + "GrpcRouteMetadataList":{ + "type":"list", + "member":{"shape":"GrpcRouteMetadata"}, + "max":10, + "min":1 }, - "VirtualRouterData": { - "type": "structure", - "required": [ - "meshName", - "metadata", - "spec", - "status", - "virtualRouterName" - ], - "members": { - "meshName": { - "shape": "ResourceName", - "documentation": "

The name of the service mesh that the virtual router resides in.

" + "GrpcRouteMetadataMatchMethod":{ + "type":"structure", + "members":{ + "exact":{ + "shape":"HeaderMatch", + "documentation":"

The value sent by the client must match the specified value exactly.

" }, - "metadata": { - "shape": "ResourceMetadata", - "documentation": "

The associated metadata for the virtual router.

" + "prefix":{ + "shape":"HeaderMatch", + "documentation":"

The value sent by the client must begin with the specified characters.

" }, - "spec": { - "shape": "VirtualRouterSpec", - "documentation": "

The specifications of the virtual router.

" + "range":{ + "shape":"MatchRange", + "documentation":"

An object that represents the range of values to match on.

" }, - "status": { - "shape": "VirtualRouterStatus", - "documentation": "

The current status of the virtual router.

" + "regex":{ + "shape":"HeaderMatch", + "documentation":"

The value sent by the client must include the specified characters.

" }, - "virtualRouterName": { - "shape": "ResourceName", - "documentation": "

The name of the virtual router.

" + "suffix":{ + "shape":"HeaderMatch", + "documentation":"

The value sent by the client must end with the specified characters.

" } }, - "documentation": "

An object that represents a virtual router returned by a describe operation.

" + "documentation":"

An object that represents the match method. Specify one of the match values.

", + "union":true }, - "UpdateMeshInput": { - "type": "structure", - "required": [ - "meshName" - ], - "members": { - "clientToken": { - "shape": "String", - "documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the\nrequest. Up to 36 letters, numbers, hyphens, and underscores are allowed.

", - "idempotencyToken": true + "GrpcTimeout":{ + "type":"structure", + "members":{ + "idle":{ + "shape":"Duration", + "documentation":"

An object that represents an idle timeout. An idle timeout bounds the amount of time that a connection may be idle. The default value is none.

" + }, + "perRequest":{ + "shape":"Duration", + "documentation":"

An object that represents a per request timeout. The default value is 15 seconds. If you set a higher timeout, then make sure that the higher value is set for each App Mesh resource in a conversation. For example, if a virtual node backend uses a virtual router provider to route to another virtual node, then the timeout should be greater than 15 seconds for the source and destination virtual node and the route.

" + } + }, + "documentation":"

An object that represents types of timeouts.

" + }, + "HeaderMatch":{ + "type":"string", + "max":255, + "min":1 + }, + "HeaderMatchMethod":{ + "type":"structure", + "members":{ + "exact":{ + "shape":"HeaderMatch", + "documentation":"

The value sent by the client must match the specified value exactly.

" + }, + "prefix":{ + "shape":"HeaderMatch", + "documentation":"

The value sent by the client must begin with the specified characters.

" + }, + "range":{ + "shape":"MatchRange", + "documentation":"

An object that represents the range of values to match on.

" }, - "meshName": { - "shape": "ResourceName", - "documentation": "

The name of the service mesh to update.

", - "location": "uri", - "locationName": "meshName" + "regex":{ + "shape":"HeaderMatch", + "documentation":"

The value sent by the client must include the specified characters.

" }, - "spec": { - "shape": "MeshSpec", - "documentation": "

The service mesh specification to apply.

" + "suffix":{ + "shape":"HeaderMatch", + "documentation":"

The value sent by the client must end with the specified characters.

" } }, - "documentation": "" + "documentation":"

An object that represents the method and value to match with the header value sent in a request. Specify one match method.

", + "union":true }, - "VirtualGatewayHealthCheckPolicy": { - "type": "structure", - "required": [ + "HeaderName":{ + "type":"string", + "max":50, + "min":1 + }, + "HealthCheckIntervalMillis":{ + "type":"long", + "box":true, + "max":300000, + "min":5000 + }, + "HealthCheckPolicy":{ + "type":"structure", + "required":[ "healthyThreshold", "intervalMillis", "protocol", "timeoutMillis", "unhealthyThreshold" ], - "members": { - "healthyThreshold": { - "shape": "VirtualGatewayHealthCheckThreshold", - "documentation": "

The number of consecutive successful health checks that must occur before declaring the\n listener healthy.

" + "members":{ + "healthyThreshold":{ + "shape":"HealthCheckThreshold", + "documentation":"

The number of consecutive successful health checks that must occur before declaring listener healthy.

" }, - "intervalMillis": { - "shape": "VirtualGatewayHealthCheckIntervalMillis", - "documentation": "

The time period in milliseconds between each health check execution.

" + "intervalMillis":{ + "shape":"HealthCheckIntervalMillis", + "documentation":"

The time period in milliseconds between each health check execution.

" }, - "path": { - "shape": "String", - "documentation": "

The destination path for the health check request. This value is only used if the\n specified protocol is HTTP or HTTP/2. For any other protocol, this value is ignored.

" + "path":{ + "shape":"String", + "documentation":"

The destination path for the health check request. This value is only used if the specified protocol is HTTP or HTTP/2. For any other protocol, this value is ignored.

" }, - "port": { - "shape": "PortNumber", - "documentation": "

The destination port for the health check request. This port must match the port defined\n in the PortMapping for the listener.

" + "port":{ + "shape":"PortNumber", + "documentation":"

The destination port for the health check request. This port must match the port defined in the PortMapping for the listener.

" }, - "protocol": { - "shape": "VirtualGatewayPortProtocol", - "documentation": "

The protocol for the health check request. If you specify grpc, then your\n service must conform to the GRPC Health\n Checking Protocol.

" + "protocol":{ + "shape":"PortProtocol", + "documentation":"

The protocol for the health check request. If you specify grpc, then your service must conform to the GRPC Health Checking Protocol.

" }, - "timeoutMillis": { - "shape": "VirtualGatewayHealthCheckTimeoutMillis", - "documentation": "

The amount of time to wait when receiving a response from the health check, in\n milliseconds.

" + "timeoutMillis":{ + "shape":"HealthCheckTimeoutMillis", + "documentation":"

The amount of time to wait when receiving a response from the health check, in milliseconds.

" }, - "unhealthyThreshold": { - "shape": "VirtualGatewayHealthCheckThreshold", - "documentation": "

The number of consecutive failed health checks that must occur before declaring a\n virtual gateway unhealthy.

" + "unhealthyThreshold":{ + "shape":"HealthCheckThreshold", + "documentation":"

The number of consecutive failed health checks that must occur before declaring a virtual node unhealthy.

" } }, - "documentation": "

An object that represents the health check policy for a virtual gateway's\n listener.

" + "documentation":"

An object that represents the health check policy for a virtual node's listener.

" }, - "CreateVirtualRouterInput": { - "type": "structure", - "required": [ - "meshName", - "spec", - "virtualRouterName" - ], - "members": { - "clientToken": { - "shape": "String", - "documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the\nrequest. Up to 36 letters, numbers, hyphens, and underscores are allowed.

", - "idempotencyToken": true - }, - "meshName": { - "shape": "ResourceName", - "documentation": "

The name of the service mesh to create the virtual router in.

", - "location": "uri", - "locationName": "meshName" - }, - "meshOwner": { - "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then\n the account that you specify must share the mesh with your account before you can create \n the resource in the service mesh. For more information about mesh sharing, see Working with shared meshes.

", - "location": "querystring", - "locationName": "meshOwner" - }, - "spec": { - "shape": "VirtualRouterSpec", - "documentation": "

The virtual router specification to apply.

" - }, - "tags": { - "shape": "TagList", - "documentation": "

Optional metadata that you can apply to the virtual router to assist with categorization\n and organization. Each tag consists of a key and an optional value, both of which you\n define. Tag keys can have a maximum character length of 128 characters, and tag values can have\n a maximum length of 256 characters.

", - "tags": [ - "not-preview" - ] - }, - "virtualRouterName": { - "shape": "ResourceName", - "documentation": "

The name to use for the virtual router.

" - } - }, - "documentation": "" - }, - "DescribeVirtualRouterOutput": { - "type": "structure", - "required": [ - "virtualRouter" - ], - "members": { - "virtualRouter": { - "shape": "VirtualRouterData", - "documentation": "

The full description of your virtual router.

" - } - }, - "documentation": "", - "payload": "virtualRouter" + "HealthCheckThreshold":{ + "type":"integer", + "max":10, + "min":2 + }, + "HealthCheckTimeoutMillis":{ + "type":"long", + "box":true, + "max":60000, + "min":2000 }, - "CreateMeshOutput": { - "type": "structure", - "required": [ - "mesh" + "Hostname":{"type":"string"}, + "HttpGatewayRoute":{ + "type":"structure", + "required":[ + "action", + "match" ], - "members": { - "mesh": { - "shape": "MeshData", - "documentation": "

The full description of your service mesh following the create call.

" + "members":{ + "action":{ + "shape":"HttpGatewayRouteAction", + "documentation":"

An object that represents the action to take if a match is determined.

" + }, + "match":{ + "shape":"HttpGatewayRouteMatch", + "documentation":"

An object that represents the criteria for determining a request match.

" } }, - "documentation": "", - "payload": "mesh" + "documentation":"

An object that represents an HTTP gateway route.

" }, - "CreateVirtualRouterOutput": { - "type": "structure", - "required": [ - "virtualRouter" - ], - "members": { - "virtualRouter": { - "shape": "VirtualRouterData", - "documentation": "

The full description of your virtual router following the create call.

" + "HttpGatewayRouteAction":{ + "type":"structure", + "required":["target"], + "members":{ + "target":{ + "shape":"GatewayRouteTarget", + "documentation":"

An object that represents the target that traffic is routed to when a request matches the gateway route.

" } }, - "documentation": "", - "payload": "virtualRouter" + "documentation":"

An object that represents the action to take if a match is determined.

" }, - "VirtualServiceStatus": { - "type": "structure", - "required": [ - "status" - ], - "members": { - "status": { - "shape": "VirtualServiceStatusCode", - "documentation": "

The current status of the virtual service.

" + "HttpGatewayRouteMatch":{ + "type":"structure", + "required":["prefix"], + "members":{ + "prefix":{ + "shape":"String", + "documentation":"

Specifies the path to match requests with. This parameter must always start with /, which by itself matches all requests to the virtual service name. You can also match for path-based routing of requests. For example, if your virtual service name is my-service.local and you want the route to match requests to my-service.local/metrics, your prefix should be /metrics.

" } }, - "documentation": "

An object that represents the status of a virtual service.

" + "documentation":"

An object that represents the criteria for determining a request match.

" }, - "HttpRetryPolicyEvents": { - "type": "list", - "member": { - "shape": "HttpRetryPolicyEvent" - }, - "min": 1, - "max": 25 + "HttpMethod":{ + "type":"string", + "enum":[ + "GET", + "HEAD", + "POST", + "PUT", + "DELETE", + "CONNECT", + "OPTIONS", + "TRACE", + "PATCH" + ] }, - "VirtualGatewayListenerTlsCertificate": { - "type": "structure", - "members": { - "acm": { - "shape": "VirtualGatewayListenerTlsAcmCertificate", - "documentation": "

A reference to an object that represents an AWS Certicate Manager (ACM) certificate.

" + "HttpRetryPolicy":{ + "type":"structure", + "required":[ + "maxRetries", + "perRetryTimeout" + ], + "members":{ + "httpRetryEvents":{ + "shape":"HttpRetryPolicyEvents", + "documentation":"

Specify at least one of the following values.

  • server-error – HTTP status codes 500, 501, 502, 503, 504, 505, 506, 507, 508, 510, and 511

  • gateway-error – HTTP status codes 502, 503, and 504

  • client-error – HTTP status code 409

  • stream-error – Retry on refused stream

" }, - "file": { - "shape": "VirtualGatewayListenerTlsFileCertificate", - "documentation": "

A reference to an object that represents a local file certificate.

" - } - }, - "documentation": "

An object that represents a listener's Transport Layer Security (TLS) certificate.

" - }, - "ListenerTlsCertificate": { - "type": "structure", - "members": { - "acm": { - "shape": "ListenerTlsAcmCertificate", - "documentation": "

A reference to an object that represents an AWS Certicate Manager (ACM) certificate.

" + "maxRetries":{ + "shape":"MaxRetries", + "documentation":"

The maximum number of retry attempts.

" + }, + "perRetryTimeout":{ + "shape":"Duration", + "documentation":"

The timeout for each retry attempt.

" }, - "file": { - "shape": "ListenerTlsFileCertificate", - "documentation": "

A reference to an object that represents a local file certificate.

" + "tcpRetryEvents":{ + "shape":"TcpRetryPolicyEvents", + "documentation":"

Specify a valid value. The event occurs before any processing of a request has started and is encountered when the upstream is temporarily or permanently unavailable.

" } }, - "documentation": "

An object that represents a listener's Transport Layer Security (TLS) certificate.

" + "documentation":"

An object that represents a retry policy. Specify at least one value for at least one of the types of RetryEvents, a value for maxRetries, and a value for perRetryTimeout.

" }, - "ListMeshesLimit": { - "type": "integer", - "box": true, - "min": 1, - "max": 100 + "HttpRetryPolicyEvent":{ + "type":"string", + "max":25, + "min":1 }, - "AwsCloudMapInstanceAttributeKey": { - "type": "string", - "min": 1, - "max": 255, - "pattern": "^[a-zA-Z0-9!-~]+$" + "HttpRetryPolicyEvents":{ + "type":"list", + "member":{"shape":"HttpRetryPolicyEvent"}, + "max":25, + "min":1 }, - "VirtualRouterSpec": { - "type": "structure", - "members": { - "listeners": { - "shape": "VirtualRouterListeners", - "documentation": "

The listeners that the virtual router is expected to receive inbound traffic from. You\n can specify one listener.

" - } - }, - "documentation": "

An object that represents the specification of a virtual router.

" - }, - "GatewayRouteVirtualService": { - "type": "structure", - "required": [ - "virtualServiceName" + "HttpRoute":{ + "type":"structure", + "required":[ + "action", + "match" ], - "members": { - "virtualServiceName": { - "shape": "ResourceName", - "documentation": "

The name of the virtual service that traffic is routed to.

" - } - }, - "documentation": "

An object that represents the virtual service that traffic is routed to.

" - }, - "VirtualNodeSpec": { - "type": "structure", - "members": { - "backendDefaults": { - "shape": "BackendDefaults", - "documentation": "

A reference to an object that represents the defaults for backends.

" - }, - "backends": { - "shape": "Backends", - "documentation": "

The backends that the virtual node is expected to send outbound traffic to.

" + "members":{ + "action":{ + "shape":"HttpRouteAction", + "documentation":"

An object that represents the action to take if a match is determined.

" }, - "listeners": { - "shape": "Listeners", - "documentation": "

The listener that the virtual node is expected to receive inbound traffic from. You can\n specify one listener.

" + "match":{ + "shape":"HttpRouteMatch", + "documentation":"

An object that represents the criteria for determining a request match.

" }, - "logging": { - "shape": "Logging", - "documentation": "

The inbound and outbound access logging information for the virtual node.

" + "retryPolicy":{ + "shape":"HttpRetryPolicy", + "documentation":"

An object that represents a retry policy.

" }, - "serviceDiscovery": { - "shape": "ServiceDiscovery", - "documentation": "

The service discovery information for the virtual node. If your virtual node does not\n expect ingress traffic, you can omit this parameter. If you specify a\n listener, then you must specify service discovery information.

" + "timeout":{ + "shape":"HttpTimeout", + "documentation":"

An object that represents types of timeouts.

" } }, - "documentation": "

An object that represents the specification of a virtual node.

" + "documentation":"

An object that represents an HTTP or HTTP/2 route type.

" }, - "ListMeshesOutput": { - "type": "structure", - "required": [ - "meshes" - ], - "members": { - "meshes": { - "shape": "MeshList", - "documentation": "

The list of existing service meshes.

" - }, - "nextToken": { - "shape": "String", - "documentation": "

The nextToken value to include in a future ListMeshes request.\n When the results of a ListMeshes request exceed limit, you can\n use this value to retrieve the next page of results. This value is null when\n there are no more results to return.

" + "HttpRouteAction":{ + "type":"structure", + "required":["weightedTargets"], + "members":{ + "weightedTargets":{ + "shape":"WeightedTargets", + "documentation":"

An object that represents the targets that traffic is routed to when a request matches the route.

" } }, - "documentation": "" + "documentation":"

An object that represents the action to take if a match is determined.

" }, - "VirtualRouterListeners": { - "type": "list", - "member": { - "shape": "VirtualRouterListener" - }, - "min": 1, - "max": 1 - }, - "GatewayRouteSpec": { - "type": "structure", - "members": { - "grpcRoute": { - "shape": "GrpcGatewayRoute", - "documentation": "

An object that represents the specification of a gRPC gateway route.

" + "HttpRouteHeader":{ + "type":"structure", + "required":["name"], + "members":{ + "invert":{ + "shape":"Boolean", + "documentation":"

Specify True to match anything except the match criteria. The default value is False.

" }, - "http2Route": { - "shape": "HttpGatewayRoute", - "documentation": "

An object that represents the specification of an HTTP/2 gateway route.

" + "match":{ + "shape":"HeaderMatchMethod", + "documentation":"

The HeaderMatchMethod object.

" }, - "httpRoute": { - "shape": "HttpGatewayRoute", - "documentation": "

An object that represents the specification of an HTTP gateway route.

" + "name":{ + "shape":"HeaderName", + "documentation":"

A name for the HTTP header in the client request that will be matched on.

" } }, - "documentation": "

An object that represents a gateway route specification. Specify one gateway route\n type.

" - }, - "PortSet": { - "type": "list", - "member": { - "shape": "PortNumber" - } + "documentation":"

An object that represents the HTTP header in the request.

" }, - "HttpMethod": { - "type": "string", - "enum": [ - "CONNECT", - "DELETE", - "GET", - "HEAD", - "OPTIONS", - "PATCH", - "POST", - "PUT", - "TRACE" - ] + "HttpRouteHeaders":{ + "type":"list", + "member":{"shape":"HttpRouteHeader"}, + "max":10, + "min":1 }, - "ConflictException": { - "type": "structure", - "members": { - "message": { - "shape": "String" + "HttpRouteMatch":{ + "type":"structure", + "required":["prefix"], + "members":{ + "headers":{ + "shape":"HttpRouteHeaders", + "documentation":"

An object that represents the client request headers to match on.

" + }, + "method":{ + "shape":"HttpMethod", + "documentation":"

The client request method to match on. Specify only one.

" + }, + "prefix":{ + "shape":"String", + "documentation":"

Specifies the path to match requests with. This parameter must always start with /, which by itself matches all requests to the virtual service name. You can also match for path-based routing of requests. For example, if your virtual service name is my-service.local and you want the route to match requests to my-service.local/metrics, your prefix should be /metrics.

" + }, + "scheme":{ + "shape":"HttpScheme", + "documentation":"

The client request scheme to match on. Specify only one.

" } }, - "documentation": "

The request contains a client token that was used for a previous update resource call\n with different specifications. Try the request again with a new client token.

", - "exception": true, - "error": { - "code": "ConflictException", - "httpStatusCode": 409, - "senderFault": true - } + "documentation":"

An object that represents the requirements for a route to match HTTP requests for a virtual router.

" }, - "VirtualGatewayBackendDefaults": { - "type": "structure", - "members": { - "clientPolicy": { - "shape": "VirtualGatewayClientPolicy", - "documentation": "

A reference to an object that represents a client policy.

" - } - }, - "documentation": "

An object that represents the default properties for a backend.

" + "HttpScheme":{ + "type":"string", + "enum":[ + "http", + "https" + ] }, - "ListenerTimeout": { - "type": "structure", - "members": { - "grpc": { - "shape": "GrpcTimeout" - }, - "http": { - "shape": "HttpTimeout", - "documentation": "

An object that represents types of timeouts.

" + "HttpTimeout":{ + "type":"structure", + "members":{ + "idle":{ + "shape":"Duration", + "documentation":"

An object that represents an idle timeout. An idle timeout bounds the amount of time that a connection may be idle. The default value is none.

" }, - "http2": { - "shape": "HttpTimeout", - "documentation": "

An object that represents types of timeouts.

" - }, - "tcp": { - "shape": "TcpTimeout", - "documentation": "

An object that represents types of timeouts.

" + "perRequest":{ + "shape":"Duration", + "documentation":"

An object that represents a per request timeout. The default value is 15 seconds. If you set a higher timeout, then make sure that the higher value is set for each App Mesh resource in a conversation. For example, if a virtual node backend uses a virtual router provider to route to another virtual node, then the timeout should be greater than 15 seconds for the source and destination virtual node and the route.

" } }, - "documentation": "

An object that represents timeouts for different protocols.

" + "documentation":"

An object that represents types of timeouts.

" }, - "MeshList": { - "type": "list", - "member": { - "shape": "MeshRef" - } + "InternalServerErrorException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

The request processing has failed because of an unknown error, exception, or failure.

", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true, + "retryable":{"throttling":false} }, - "MaxRetries": { - "type": "long", - "box": true, - "min": 0 + "LimitExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

You have exceeded a service limit for your account. For more information, see Service Limits in the AWS App Mesh User Guide.

", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true }, - "DescribeGatewayRouteInput": { - "type": "structure", - "required": [ - "gatewayRouteName", + "ListGatewayRoutesInput":{ + "type":"structure", + "required":[ "meshName", "virtualGatewayName" ], - "members": { - "gatewayRouteName": { - "shape": "ResourceName", - "documentation": "

The name of the gateway route to describe.

", - "location": "uri", - "locationName": "gatewayRouteName" - }, - "meshName": { - "shape": "ResourceName", - "documentation": "

The name of the service mesh that the gateway route resides in.

", - "location": "uri", - "locationName": "meshName" - }, - "meshOwner": { - "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", - "location": "querystring", - "locationName": "meshOwner" - }, - "virtualGatewayName": { - "shape": "ResourceName", - "documentation": "

The name of the virtual gateway that the gateway route is associated with.

", - "location": "uri", - "locationName": "virtualGatewayName" + "members":{ + "limit":{ + "shape":"ListGatewayRoutesLimit", + "documentation":"

The maximum number of results returned by ListGatewayRoutes in paginated output. When you use this parameter, ListGatewayRoutes returns only limit results in a single page along with a nextToken response element. You can see the remaining results of the initial request by sending another ListGatewayRoutes request with the returned nextToken value. This value can be between 1 and 100. If you don't use this parameter, ListGatewayRoutes returns up to 100 results and a nextToken value if applicable.

", + "location":"querystring", + "locationName":"limit" + }, + "meshName":{ + "shape":"ResourceName", + "documentation":"

The name of the service mesh to list gateway routes in.

", + "location":"uri", + "locationName":"meshName" + }, + "meshOwner":{ + "shape":"AccountId", + "documentation":"

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", + "location":"querystring", + "locationName":"meshOwner" + }, + "nextToken":{ + "shape":"String", + "documentation":"

The nextToken value returned from a previous paginated ListGatewayRoutes request where limit was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

", + "location":"querystring", + "locationName":"nextToken" + }, + "virtualGatewayName":{ + "shape":"ResourceName", + "documentation":"

The name of the virtual gateway to list gateway routes in.

", + "location":"uri", + "locationName":"virtualGatewayName" } } }, - "TlsValidationContextTrust": { - "type": "structure", - "members": { - "acm": { - "shape": "TlsValidationContextAcmTrust", - "documentation": "

A reference to an object that represents a TLS validation context trust for an AWS Certicate Manager (ACM)\n certificate.

" + "ListGatewayRoutesLimit":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListGatewayRoutesOutput":{ + "type":"structure", + "required":["gatewayRoutes"], + "members":{ + "gatewayRoutes":{ + "shape":"GatewayRouteList", + "documentation":"

The list of existing gateway routes for the specified service mesh and virtual gateway.

" }, - "file": { - "shape": "TlsValidationContextFileTrust", - "documentation": "

An object that represents a TLS validation context trust for a local file.

" + "nextToken":{ + "shape":"String", + "documentation":"

The nextToken value to include in a future ListGatewayRoutes request. When the results of a ListGatewayRoutes request exceed limit, you can use this value to retrieve the next page of results. This value is null when there are no more results to return.

" } - }, - "documentation": "

An object that represents a Transport Layer Security (TLS) validation context trust.

" + } }, - "PortMapping": { - "type": "structure", - "required": [ - "port", - "protocol" - ], - "members": { - "port": { - "shape": "PortNumber", - "documentation": "

The port used for the port mapping.

" + "ListMeshesInput":{ + "type":"structure", + "members":{ + "limit":{ + "shape":"ListMeshesLimit", + "documentation":"

The maximum number of results returned by ListMeshes in paginated output. When you use this parameter, ListMeshes returns only limit results in a single page along with a nextToken response element. You can see the remaining results of the initial request by sending another ListMeshes request with the returned nextToken value. This value can be between 1 and 100. If you don't use this parameter, ListMeshes returns up to 100 results and a nextToken value if applicable.

", + "location":"querystring", + "locationName":"limit" }, - "protocol": { - "shape": "PortProtocol", - "documentation": "

The protocol used for the port mapping. Specify one protocol.

" + "nextToken":{ + "shape":"String", + "documentation":"

The nextToken value returned from a previous paginated ListMeshes request where limit was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

This token should be treated as an opaque identifier that is used only to retrieve the next items in a list and not for other programmatic purposes.

", + "location":"querystring", + "locationName":"nextToken" } }, - "documentation": "

An object that represents a port mapping.

" + "documentation":"" }, - "VirtualGatewayHealthCheckThreshold": { - "type": "integer", - "min": 2, - "max": 10 + "ListMeshesLimit":{ + "type":"integer", + "box":true, + "max":100, + "min":1 }, - "ListVirtualServicesOutput": { - "type": "structure", - "required": [ - "virtualServices" - ], - "members": { - "nextToken": { - "shape": "String", - "documentation": "

The nextToken value to include in a future ListVirtualServices\n request. When the results of a ListVirtualServices request exceed\n limit, you can use this value to retrieve the next page of results. This\n value is null when there are no more results to return.

" + "ListMeshesOutput":{ + "type":"structure", + "required":["meshes"], + "members":{ + "meshes":{ + "shape":"MeshList", + "documentation":"

The list of existing service meshes.

" }, - "virtualServices": { - "shape": "VirtualServiceList", - "documentation": "

The list of existing virtual services for the specified service mesh.

" + "nextToken":{ + "shape":"String", + "documentation":"

The nextToken value to include in a future ListMeshes request. When the results of a ListMeshes request exceed limit, you can use this value to retrieve the next page of results. This value is null when there are no more results to return.

" } }, - "documentation": "" - }, - "AwsCloudMapInstanceAttributeValue": { - "type": "string", - "min": 1, - "max": 1024, - "pattern": "^([a-zA-Z0-9!-~][ ta-zA-Z0-9!-~]*){0,1}[a-zA-Z0-9!-~]{0,1}$" + "documentation":"" }, - "WeightedTarget": { - "type": "structure", - "required": [ - "virtualNode", - "weight" + "ListRoutesInput":{ + "type":"structure", + "required":[ + "meshName", + "virtualRouterName" ], - "members": { - "virtualNode": { - "shape": "ResourceName", - "documentation": "

The virtual node to associate with the weighted target.

" - }, - "weight": { - "shape": "PercentInt", - "documentation": "

The relative weight of the weighted target.

" + "members":{ + "limit":{ + "shape":"ListRoutesLimit", + "documentation":"

The maximum number of results returned by ListRoutes in paginated output. When you use this parameter, ListRoutes returns only limit results in a single page along with a nextToken response element. You can see the remaining results of the initial request by sending another ListRoutes request with the returned nextToken value. This value can be between 1 and 100. If you don't use this parameter, ListRoutes returns up to 100 results and a nextToken value if applicable.

", + "location":"querystring", + "locationName":"limit" + }, + "meshName":{ + "shape":"ResourceName", + "documentation":"

The name of the service mesh to list routes in.

", + "location":"uri", + "locationName":"meshName" + }, + "meshOwner":{ + "shape":"AccountId", + "documentation":"

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", + "location":"querystring", + "locationName":"meshOwner" + }, + "nextToken":{ + "shape":"String", + "documentation":"

The nextToken value returned from a previous paginated ListRoutes request where limit was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

", + "location":"querystring", + "locationName":"nextToken" + }, + "virtualRouterName":{ + "shape":"ResourceName", + "documentation":"

The name of the virtual router to list routes in.

", + "location":"uri", + "locationName":"virtualRouterName" + } + }, + "documentation":"" + }, + "ListRoutesLimit":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListRoutesOutput":{ + "type":"structure", + "required":["routes"], + "members":{ + "nextToken":{ + "shape":"String", + "documentation":"

The nextToken value to include in a future ListRoutes request. When the results of a ListRoutes request exceed limit, you can use this value to retrieve the next page of results. This value is null when there are no more results to return.

" + }, + "routes":{ + "shape":"RouteList", + "documentation":"

The list of existing routes for the specified service mesh and virtual router.

" + } + }, + "documentation":"" + }, + "ListTagsForResourceInput":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "limit":{ + "shape":"TagsLimit", + "documentation":"

The maximum number of tag results returned by ListTagsForResource in paginated output. When this parameter is used, ListTagsForResource returns only limit results in a single page along with a nextToken response element. You can see the remaining results of the initial request by sending another ListTagsForResource request with the returned nextToken value. This value can be between 1 and 100. If you don't use this parameter, ListTagsForResource returns up to 100 results and a nextToken value if applicable.

", + "location":"querystring", + "locationName":"limit" + }, + "nextToken":{ + "shape":"String", + "documentation":"

The nextToken value returned from a previous paginated ListTagsForResource request where limit was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

", + "location":"querystring", + "locationName":"nextToken" + }, + "resourceArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) that identifies the resource to list the tags for.

", + "location":"querystring", + "locationName":"resourceArn" + } + }, + "documentation":"" + }, + "ListTagsForResourceOutput":{ + "type":"structure", + "required":["tags"], + "members":{ + "nextToken":{ + "shape":"String", + "documentation":"

The nextToken value to include in a future ListTagsForResource request. When the results of a ListTagsForResource request exceed limit, you can use this value to retrieve the next page of results. This value is null when there are no more results to return.

" + }, + "tags":{ + "shape":"TagList", + "documentation":"

The tags for the resource.

" + } + }, + "documentation":"" + }, + "ListVirtualGatewaysInput":{ + "type":"structure", + "required":["meshName"], + "members":{ + "limit":{ + "shape":"ListVirtualGatewaysLimit", + "documentation":"

The maximum number of results returned by ListVirtualGateways in paginated output. When you use this parameter, ListVirtualGateways returns only limit results in a single page along with a nextToken response element. You can see the remaining results of the initial request by sending another ListVirtualGateways request with the returned nextToken value. This value can be between 1 and 100. If you don't use this parameter, ListVirtualGateways returns up to 100 results and a nextToken value if applicable.

", + "location":"querystring", + "locationName":"limit" + }, + "meshName":{ + "shape":"ResourceName", + "documentation":"

The name of the service mesh to list virtual gateways in.

", + "location":"uri", + "locationName":"meshName" + }, + "meshOwner":{ + "shape":"AccountId", + "documentation":"

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", + "location":"querystring", + "locationName":"meshOwner" + }, + "nextToken":{ + "shape":"String", + "documentation":"

The nextToken value returned from a previous paginated ListVirtualGateways request where limit was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

", + "location":"querystring", + "locationName":"nextToken" } - }, - "documentation": "

An object that represents a target and its relative weight. Traffic is distributed\n across targets according to their relative weight. For example, a weighted target with a\n relative weight of 50 receives five times as much traffic as one with a relative weight of\n 10. The total weight for all targets combined must be less than or equal to 100.

" + } }, - "GrpcGatewayRoute": { - "type": "structure", - "required": [ - "action", - "match" - ], - "members": { - "action": { - "shape": "GrpcGatewayRouteAction", - "documentation": "

An object that represents the action to take if a match is determined.

" + "ListVirtualGatewaysLimit":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListVirtualGatewaysOutput":{ + "type":"structure", + "required":["virtualGateways"], + "members":{ + "nextToken":{ + "shape":"String", + "documentation":"

The nextToken value to include in a future ListVirtualGateways request. When the results of a ListVirtualGateways request exceed limit, you can use this value to retrieve the next page of results. This value is null when there are no more results to return.

" }, - "match": { - "shape": "GrpcGatewayRouteMatch", - "documentation": "

An object that represents the criteria for determining a request match.

" + "virtualGateways":{ + "shape":"VirtualGatewayList", + "documentation":"

The list of existing virtual gateways for the specified service mesh.

" } - }, - "documentation": "

An object that represents a gRPC gateway route.

" + } }, - "GatewayRouteData": { - "type": "structure", - "required": [ - "gatewayRouteName", - "meshName", - "metadata", - "spec", - "status", - "virtualGatewayName" - ], - "members": { - "gatewayRouteName": { - "shape": "ResourceName", - "documentation": "

The name of the gateway route.

" + "ListVirtualNodesInput":{ + "type":"structure", + "required":["meshName"], + "members":{ + "limit":{ + "shape":"ListVirtualNodesLimit", + "documentation":"

The maximum number of results returned by ListVirtualNodes in paginated output. When you use this parameter, ListVirtualNodes returns only limit results in a single page along with a nextToken response element. You can see the remaining results of the initial request by sending another ListVirtualNodes request with the returned nextToken value. This value can be between 1 and 100. If you don't use this parameter, ListVirtualNodes returns up to 100 results and a nextToken value if applicable.

", + "location":"querystring", + "locationName":"limit" + }, + "meshName":{ + "shape":"ResourceName", + "documentation":"

The name of the service mesh to list virtual nodes in.

", + "location":"uri", + "locationName":"meshName" + }, + "meshOwner":{ + "shape":"AccountId", + "documentation":"

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", + "location":"querystring", + "locationName":"meshOwner" + }, + "nextToken":{ + "shape":"String", + "documentation":"

The nextToken value returned from a previous paginated ListVirtualNodes request where limit was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

", + "location":"querystring", + "locationName":"nextToken" + } + }, + "documentation":"" + }, + "ListVirtualNodesLimit":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListVirtualNodesOutput":{ + "type":"structure", + "required":["virtualNodes"], + "members":{ + "nextToken":{ + "shape":"String", + "documentation":"

The nextToken value to include in a future ListVirtualNodes request. When the results of a ListVirtualNodes request exceed limit, you can use this value to retrieve the next page of results. This value is null when there are no more results to return.

" + }, + "virtualNodes":{ + "shape":"VirtualNodeList", + "documentation":"

The list of existing virtual nodes for the specified service mesh.

" + } + }, + "documentation":"" + }, + "ListVirtualRoutersInput":{ + "type":"structure", + "required":["meshName"], + "members":{ + "limit":{ + "shape":"ListVirtualRoutersLimit", + "documentation":"

The maximum number of results returned by ListVirtualRouters in paginated output. When you use this parameter, ListVirtualRouters returns only limit results in a single page along with a nextToken response element. You can see the remaining results of the initial request by sending another ListVirtualRouters request with the returned nextToken value. This value can be between 1 and 100. If you don't use this parameter, ListVirtualRouters returns up to 100 results and a nextToken value if applicable.

", + "location":"querystring", + "locationName":"limit" + }, + "meshName":{ + "shape":"ResourceName", + "documentation":"

The name of the service mesh to list virtual routers in.

", + "location":"uri", + "locationName":"meshName" + }, + "meshOwner":{ + "shape":"AccountId", + "documentation":"

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", + "location":"querystring", + "locationName":"meshOwner" + }, + "nextToken":{ + "shape":"String", + "documentation":"

The nextToken value returned from a previous paginated ListVirtualRouters request where limit was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

", + "location":"querystring", + "locationName":"nextToken" + } + }, + "documentation":"" + }, + "ListVirtualRoutersLimit":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListVirtualRoutersOutput":{ + "type":"structure", + "required":["virtualRouters"], + "members":{ + "nextToken":{ + "shape":"String", + "documentation":"

The nextToken value to include in a future ListVirtualRouters request. When the results of a ListVirtualRouters request exceed limit, you can use this value to retrieve the next page of results. This value is null when there are no more results to return.

" + }, + "virtualRouters":{ + "shape":"VirtualRouterList", + "documentation":"

The list of existing virtual routers for the specified service mesh.

" + } + }, + "documentation":"" + }, + "ListVirtualServicesInput":{ + "type":"structure", + "required":["meshName"], + "members":{ + "limit":{ + "shape":"ListVirtualServicesLimit", + "documentation":"

The maximum number of results returned by ListVirtualServices in paginated output. When you use this parameter, ListVirtualServices returns only limit results in a single page along with a nextToken response element. You can see the remaining results of the initial request by sending another ListVirtualServices request with the returned nextToken value. This value can be between 1 and 100. If you don't use this parameter, ListVirtualServices returns up to 100 results and a nextToken value if applicable.

", + "location":"querystring", + "locationName":"limit" + }, + "meshName":{ + "shape":"ResourceName", + "documentation":"

The name of the service mesh to list virtual services in.

", + "location":"uri", + "locationName":"meshName" + }, + "meshOwner":{ + "shape":"AccountId", + "documentation":"

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", + "location":"querystring", + "locationName":"meshOwner" + }, + "nextToken":{ + "shape":"String", + "documentation":"

The nextToken value returned from a previous paginated ListVirtualServices request where limit was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

", + "location":"querystring", + "locationName":"nextToken" + } + }, + "documentation":"" + }, + "ListVirtualServicesLimit":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListVirtualServicesOutput":{ + "type":"structure", + "required":["virtualServices"], + "members":{ + "nextToken":{ + "shape":"String", + "documentation":"

The nextToken value to include in a future ListVirtualServices request. When the results of a ListVirtualServices request exceed limit, you can use this value to retrieve the next page of results. This value is null when there are no more results to return.

" + }, + "virtualServices":{ + "shape":"VirtualServiceList", + "documentation":"

The list of existing virtual services for the specified service mesh.

" + } + }, + "documentation":"" + }, + "Listener":{ + "type":"structure", + "required":["portMapping"], + "members":{ + "connectionPool":{ + "shape":"VirtualNodeConnectionPool", + "documentation":"

The connection pool information for the listener.

" + }, + "healthCheck":{ + "shape":"HealthCheckPolicy", + "documentation":"

The health check information for the listener.

" + }, + "outlierDetection":{ + "shape":"OutlierDetection", + "documentation":"

The outlier detection information for the listener.

" + }, + "portMapping":{ + "shape":"PortMapping", + "documentation":"

The port mapping information for the listener.

" + }, + "timeout":{ + "shape":"ListenerTimeout", + "documentation":"

An object that represents timeouts for different protocols.

" }, - "meshName": { - "shape": "ResourceName", - "documentation": "

The name of the service mesh that the resource resides in.

" - }, - "metadata": { - "shape": "ResourceMetadata" - }, - "spec": { - "shape": "GatewayRouteSpec", - "documentation": "

The specifications of the gateway route.

" - }, - "status": { - "shape": "GatewayRouteStatus", - "documentation": "

The status of the gateway route.

" + "tls":{ + "shape":"ListenerTls", + "documentation":"

A reference to an object that represents the Transport Layer Security (TLS) properties for a listener.

" + } + }, + "documentation":"

An object that represents a listener for a virtual node.

" + }, + "ListenerTimeout":{ + "type":"structure", + "members":{ + "grpc":{"shape":"GrpcTimeout"}, + "http":{ + "shape":"HttpTimeout", + "documentation":"

An object that represents types of timeouts.

" + }, + "http2":{ + "shape":"HttpTimeout", + "documentation":"

An object that represents types of timeouts.

" + }, + "tcp":{ + "shape":"TcpTimeout", + "documentation":"

An object that represents types of timeouts.

" + } + }, + "documentation":"

An object that represents timeouts for different protocols.

", + "union":true + }, + "ListenerTls":{ + "type":"structure", + "required":[ + "certificate", + "mode" + ], + "members":{ + "certificate":{ + "shape":"ListenerTlsCertificate", + "documentation":"

A reference to an object that represents a listener's TLS certificate.

" }, - "virtualGatewayName": { - "shape": "ResourceName", - "documentation": "

The virtual gateway that the gateway route is associated with.

" + "mode":{ + "shape":"ListenerTlsMode", + "documentation":"

Specify one of the following modes.

  • STRICT – Listener only accepts connections with TLS enabled.

  • PERMISSIVE – Listener accepts connections with or without TLS enabled.

  • DISABLED – Listener only accepts connections without TLS.

" } }, - "documentation": "

An object that represents a gateway route returned by a describe operation.

" + "documentation":"

An object that represents the Transport Layer Security (TLS) properties for a listener.

" }, - "RouteRef": { - "type": "structure", - "required": [ - "arn", - "createdAt", - "lastUpdatedAt", - "meshName", - "meshOwner", - "resourceOwner", - "routeName", - "version", - "virtualRouterName" - ], - "members": { - "arn": { - "shape": "Arn", - "documentation": "

The full Amazon Resource Name (ARN) for the route.

" - }, - "createdAt": { - "shape": "Timestamp", - "documentation": "

The Unix epoch timestamp in seconds for when the resource was created.

" - }, - "lastUpdatedAt": { - "shape": "Timestamp", - "documentation": "

The Unix epoch timestamp in seconds for when the resource was last updated.

" - }, - "meshName": { - "shape": "ResourceName", - "documentation": "

The name of the service mesh that the route resides in.

" - }, - "meshOwner": { - "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

" - }, - "resourceOwner": { - "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the resource owner. If the account ID is not your own, then it's\n the ID of the mesh owner or of another account that the mesh is shared with. For more information about mesh sharing, see Working with shared meshes.

" - }, - "routeName": { - "shape": "ResourceName", - "documentation": "

The name of the route.

" - }, - "version": { - "shape": "Long", - "documentation": "

The version of the resource. Resources are created at version 1, and this version is incremented each time that they're updated.

" - }, - "virtualRouterName": { - "shape": "ResourceName", - "documentation": "

The virtual router that the route is associated with.

" + "ListenerTlsAcmCertificate":{ + "type":"structure", + "required":["certificateArn"], + "members":{ + "certificateArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) for the certificate. The certificate must meet specific requirements and you must have proxy authorization enabled. For more information, see Transport Layer Security (TLS).

" } }, - "documentation": "

An object that represents a route returned by a list operation.

" + "documentation":"

An object that represents an AWS Certicate Manager (ACM) certificate.

" }, - "DeleteVirtualNodeInput": { - "type": "structure", - "required": [ - "meshName", - "virtualNodeName" - ], - "members": { - "meshName": { - "shape": "ResourceName", - "documentation": "

The name of the service mesh to delete the virtual node in.

", - "location": "uri", - "locationName": "meshName" - }, - "meshOwner": { - "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", - "location": "querystring", - "locationName": "meshOwner" - }, - "virtualNodeName": { - "shape": "ResourceName", - "documentation": "

The name of the virtual node to delete.

", - "location": "uri", - "locationName": "virtualNodeName" - } - }, - "documentation": "" - }, - "RouteData": { - "type": "structure", - "required": [ - "meshName", - "metadata", - "routeName", - "spec", - "status", - "virtualRouterName" - ], - "members": { - "meshName": { - "shape": "ResourceName", - "documentation": "

The name of the service mesh that the route resides in.

" - }, - "metadata": { - "shape": "ResourceMetadata", - "documentation": "

The associated metadata for the route.

" - }, - "routeName": { - "shape": "ResourceName", - "documentation": "

The name of the route.

" + "ListenerTlsCertificate":{ + "type":"structure", + "members":{ + "acm":{ + "shape":"ListenerTlsAcmCertificate", + "documentation":"

A reference to an object that represents an AWS Certicate Manager (ACM) certificate.

" }, - "spec": { - "shape": "RouteSpec", - "documentation": "

The specifications of the route.

" - }, - "status": { - "shape": "RouteStatus", - "documentation": "

The status of the route.

" - }, - "virtualRouterName": { - "shape": "ResourceName", - "documentation": "

The virtual router that the route is associated with.

" + "file":{ + "shape":"ListenerTlsFileCertificate", + "documentation":"

A reference to an object that represents a local file certificate.

" } }, - "documentation": "

An object that represents a route returned by a describe operation.

" - }, - "RouteStatusCode": { - "type": "string", - "enum": [ - "ACTIVE", - "DELETED", - "INACTIVE" - ] + "documentation":"

An object that represents a listener's Transport Layer Security (TLS) certificate.

", + "union":true }, - "InternalServerErrorException": { - "type": "structure", - "members": { - "message": { - "shape": "String" + "ListenerTlsFileCertificate":{ + "type":"structure", + "required":[ + "certificateChain", + "privateKey" + ], + "members":{ + "certificateChain":{ + "shape":"FilePath", + "documentation":"

The certificate chain for the certificate.

" + }, + "privateKey":{ + "shape":"FilePath", + "documentation":"

The private key for a certificate stored on the file system of the virtual node that the proxy is running on.

" } }, - "documentation": "

The request processing has failed because of an unknown error, exception, or\n failure.

", - "exception": true, - "error": { - "code": "InternalServerErrorException", - "httpStatusCode": 500, - "fault": true - } - }, - "HeaderName": { - "type": "string", - "min": 1, - "max": 50 - }, - "TagList": { - "type": "list", - "member": { - "shape": "TagRef" - }, - "min": 0, - "max": 50 + "documentation":"

An object that represents a local file certificate. The certificate must meet specific requirements and you must have proxy authorization enabled. For more information, see Transport Layer Security (TLS).

" }, - "GrpcRetryPolicyEvent": { - "type": "string", - "enum": [ - "cancelled", - "deadline-exceeded", - "internal", - "resource-exhausted", - "unavailable" + "ListenerTlsMode":{ + "type":"string", + "enum":[ + "STRICT", + "PERMISSIVE", + "DISABLED" ] }, - "TlsValidationContextAcmTrust": { - "type": "structure", - "required": [ - "certificateAuthorityArns" - ], - "members": { - "certificateAuthorityArns": { - "shape": "CertificateAuthorityArns", - "documentation": "

One or more ACM Amazon Resource Name (ARN)s.

" - } - }, - "documentation": "

An object that represents a TLS validation context trust for an AWS Certicate Manager (ACM)\n certificate.

" - }, - "ForbiddenException": { - "type": "structure", - "members": { - "message": { - "shape": "String" - } - }, - "documentation": "

You don't have permissions to perform this action.

", - "exception": true, - "error": { - "code": "ForbiddenException", - "httpStatusCode": 403, - "senderFault": true - } + "Listeners":{ + "type":"list", + "member":{"shape":"Listener"}, + "max":1, + "min":0 }, - "HeaderMatchMethod": { - "type": "structure", - "members": { - "exact": { - "shape": "HeaderMatch", - "documentation": "

The value sent by the client must match the specified value exactly.

" - }, - "prefix": { - "shape": "HeaderMatch", - "documentation": "

The value sent by the client must begin with the specified characters.

" - }, - "range": { - "shape": "MatchRange", - "documentation": "

An object that represents the range of values to match on.

" - }, - "regex": { - "shape": "HeaderMatch", - "documentation": "

The value sent by the client must include the specified characters.

" - }, - "suffix": { - "shape": "HeaderMatch", - "documentation": "

The value sent by the client must end with the specified characters.

" + "Logging":{ + "type":"structure", + "members":{ + "accessLog":{ + "shape":"AccessLog", + "documentation":"

The access log configuration for a virtual node.

" } }, - "documentation": "

An object that represents the method and value to match with the header value sent in a\n request. Specify one match method.

" + "documentation":"

An object that represents the logging information for a virtual node.

" }, - "DeleteMeshOutput": { - "type": "structure", - "required": [ - "mesh" - ], - "members": { - "mesh": { - "shape": "MeshData", - "documentation": "

The service mesh that was deleted.

" - } - }, - "documentation": "", - "payload": "mesh" + "Long":{ + "type":"long", + "box":true }, - "VirtualGatewayClientPolicyTls": { - "type": "structure", - "required": [ - "validation" + "MatchRange":{ + "type":"structure", + "required":[ + "end", + "start" ], - "members": { - "enforce": { - "shape": "Boolean", - "box": true, - "documentation": "

Whether the policy is enforced. The default is True, if a value isn't\n specified.

" - }, - "ports": { - "shape": "PortSet", - "documentation": "

One or more ports that the policy is enforced for.

" + "members":{ + "end":{ + "shape":"Long", + "documentation":"

The end of the range.

" }, - "validation": { - "shape": "VirtualGatewayTlsValidationContext", - "documentation": "

A reference to an object that represents a TLS validation context.

" + "start":{ + "shape":"Long", + "documentation":"

The start of the range.

" } }, - "documentation": "

An object that represents a Transport Layer Security (TLS) client policy.

" + "documentation":"

An object that represents the range of values to match on. The first character of the range is included in the range, though the last character is not. For example, if the range specified were 1-100, only values 1-99 would be matched.

" }, - "EgressFilterType": { - "type": "string", - "enum": [ - "ALLOW_ALL", - "DROP_ALL" - ] + "MaxConnections":{ + "type":"integer", + "min":1 }, - "DurationValue": { - "type": "long", - "box": true, - "min": 0 + "MaxPendingRequests":{ + "type":"integer", + "min":1 }, - "Hostname": { - "type": "string" + "MaxRequests":{ + "type":"integer", + "min":1 }, - "VirtualGatewayStatus": { - "type": "structure", - "required": [ - "status" - ], - "members": { - "status": { - "shape": "VirtualGatewayStatusCode", - "documentation": "

The current status.

" - } - }, - "documentation": "

An object that represents the status of the mesh resource.

" + "MaxRetries":{ + "type":"long", + "box":true, + "min":0 }, - "GatewayRouteStatus": { - "type": "structure", - "required": [ + "MeshData":{ + "type":"structure", + "required":[ + "meshName", + "metadata", + "spec", "status" ], - "members": { - "status": { - "shape": "GatewayRouteStatusCode", - "documentation": "

The current status for the gateway route.

" + "members":{ + "meshName":{ + "shape":"ResourceName", + "documentation":"

The name of the service mesh.

" + }, + "metadata":{ + "shape":"ResourceMetadata", + "documentation":"

The associated metadata for the service mesh.

" + }, + "spec":{ + "shape":"MeshSpec", + "documentation":"

The associated specification for the service mesh.

" + }, + "status":{ + "shape":"MeshStatus", + "documentation":"

The status of the service mesh.

" } }, - "documentation": "

An object that represents the current status of a gateway route.

" + "documentation":"

An object that represents a service mesh returned by a describe operation.

" }, - "VirtualGatewayListeners": { - "type": "list", - "member": { - "shape": "VirtualGatewayListener" - }, - "min": 0, - "max": 1 + "MeshList":{ + "type":"list", + "member":{"shape":"MeshRef"} }, - "TagResourceInput": { - "type": "structure", - "required": [ - "resourceArn", - "tags" + "MeshRef":{ + "type":"structure", + "required":[ + "arn", + "createdAt", + "lastUpdatedAt", + "meshName", + "meshOwner", + "resourceOwner", + "version" ], - "members": { - "resourceArn": { - "shape": "Arn", - "documentation": "

The Amazon Resource Name (ARN) of the resource to add tags to.

", - "location": "querystring", - "locationName": "resourceArn" + "members":{ + "arn":{ + "shape":"Arn", + "documentation":"

The full Amazon Resource Name (ARN) of the service mesh.

" + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

The Unix epoch timestamp in seconds for when the resource was created.

" }, - "tags": { - "shape": "TagList", - "documentation": "

The tags to add to the resource. A tag is an array of key-value pairs.\n Tag keys can have a maximum character length of 128 characters, and tag values can have\n a maximum length of 256 characters.

" + "lastUpdatedAt":{ + "shape":"Timestamp", + "documentation":"

The Unix epoch timestamp in seconds for when the resource was last updated.

" + }, + "meshName":{ + "shape":"ResourceName", + "documentation":"

The name of the service mesh.

" + }, + "meshOwner":{ + "shape":"AccountId", + "documentation":"

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

" + }, + "resourceOwner":{ + "shape":"AccountId", + "documentation":"

The AWS IAM account ID of the resource owner. If the account ID is not your own, then it's the ID of the mesh owner or of another account that the mesh is shared with. For more information about mesh sharing, see Working with shared meshes.

" + }, + "version":{ + "shape":"Long", + "documentation":"

The version of the resource. Resources are created at version 1, and this version is incremented each time that they're updated.

" } }, - "documentation": "" + "documentation":"

An object that represents a service mesh returned by a list operation.

" }, - "CreateVirtualGatewayOutput": { - "type": "structure", - "required": [ - "virtualGateway" - ], - "members": { - "virtualGateway": { - "shape": "VirtualGatewayData", - "documentation": "

The full description of your virtual gateway following the create call.

" + "MeshSpec":{ + "type":"structure", + "members":{ + "egressFilter":{ + "shape":"EgressFilter", + "documentation":"

The egress filter rules for the service mesh.

" } }, - "payload": "virtualGateway" - }, - "ListVirtualGatewaysOutput": { - "type": "structure", - "required": [ - "virtualGateways" - ], - "members": { - "nextToken": { - "shape": "String", - "documentation": "

The nextToken value to include in a future ListVirtualGateways\n request. When the results of a ListVirtualGateways request exceed\n limit, you can use this value to retrieve the next page of results. This\n value is null when there are no more results to return.

" - }, - "virtualGateways": { - "shape": "VirtualGatewayList", - "documentation": "

The list of existing virtual gateways for the specified service mesh.

" - } - } + "documentation":"

An object that represents the specification of a service mesh.

" }, - "VirtualGatewayTlsValidationContext": { - "type": "structure", - "required": [ - "trust" - ], - "members": { - "trust": { - "shape": "VirtualGatewayTlsValidationContextTrust", - "documentation": "

A reference to an object that represents a TLS validation context trust.

" + "MeshStatus":{ + "type":"structure", + "members":{ + "status":{ + "shape":"MeshStatusCode", + "documentation":"

The current mesh status.

" } }, - "documentation": "

An object that represents a Transport Layer Security (TLS) validation context.

" + "documentation":"

An object that represents the status of a service mesh.

" + }, + "MeshStatusCode":{ + "type":"string", + "enum":[ + "ACTIVE", + "INACTIVE", + "DELETED" + ] }, - "VirtualServiceProvider": { - "type": "structure", - "members": { - "virtualNode": { - "shape": "VirtualNodeServiceProvider", - "documentation": "

The virtual node associated with a virtual service.

" + "MethodName":{ + "type":"string", + "max":50, + "min":1 + }, + "NotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

The specified resource doesn't exist. Check your request syntax and try again.

", + "error":{ + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "OutlierDetection":{ + "type":"structure", + "required":[ + "baseEjectionDuration", + "interval", + "maxEjectionPercent", + "maxServerErrors" + ], + "members":{ + "baseEjectionDuration":{ + "shape":"Duration", + "documentation":"

The base amount of time for which a host is ejected.

" + }, + "interval":{ + "shape":"Duration", + "documentation":"

The time interval between ejection sweep analysis.

" + }, + "maxEjectionPercent":{ + "shape":"OutlierDetectionMaxEjectionPercent", + "documentation":"

Maximum percentage of hosts in load balancing pool for upstream service that can be ejected. Will eject at least one host regardless of the value.

" + }, + "maxServerErrors":{ + "shape":"OutlierDetectionMaxServerErrors", + "documentation":"

Number of consecutive 5xx errors required for ejection.

" + } + }, + "documentation":"

An object that represents the outlier detection for a virtual node's listener.

" + }, + "OutlierDetectionMaxEjectionPercent":{ + "type":"integer", + "box":true, + "max":100, + "min":0 + }, + "OutlierDetectionMaxServerErrors":{ + "type":"long", + "box":true, + "min":1 + }, + "PercentInt":{ + "type":"integer", + "max":100, + "min":0 + }, + "PortMapping":{ + "type":"structure", + "required":[ + "port", + "protocol" + ], + "members":{ + "port":{ + "shape":"PortNumber", + "documentation":"

The port used for the port mapping.

" }, - "virtualRouter": { - "shape": "VirtualRouterServiceProvider", - "documentation": "

The virtual router associated with a virtual service.

" + "protocol":{ + "shape":"PortProtocol", + "documentation":"

The protocol used for the port mapping. Specify one protocol.

" } }, - "documentation": "

An object that represents the provider for a virtual service.

" + "documentation":"

An object that represents a port mapping.

" }, - "GrpcRouteMatch": { - "type": "structure", - "members": { - "metadata": { - "shape": "GrpcRouteMetadataList", - "documentation": "

An object that represents the data to match from the request.

" - }, - "methodName": { - "shape": "MethodName", - "documentation": "

The method name to match from the request. If you specify a name, you must also specify\n a serviceName.

" - }, - "serviceName": { - "shape": "ServiceName", - "documentation": "

The fully qualified domain name for the service to match from the request.

" - } + "PortNumber":{ + "type":"integer", + "max":65535, + "min":1 + }, + "PortProtocol":{ + "type":"string", + "enum":[ + "http", + "tcp", + "http2", + "grpc" + ] + }, + "PortSet":{ + "type":"list", + "member":{"shape":"PortNumber"} + }, + "ResourceInUseException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} }, - "documentation": "

An object that represents the criteria for determining a request match.

" + "documentation":"

You can't delete the specified resource because it's in use or required by another resource.

", + "error":{ + "httpStatusCode":409, + "senderFault":true + }, + "exception":true }, - "AwsCloudMapServiceDiscovery": { - "type": "structure", - "required": [ - "namespaceName", - "serviceName" + "ResourceMetadata":{ + "type":"structure", + "required":[ + "arn", + "createdAt", + "lastUpdatedAt", + "meshOwner", + "resourceOwner", + "uid", + "version" ], - "members": { - "attributes": { - "shape": "AwsCloudMapInstanceAttributes", - "documentation": "

A string map that contains attributes with values that you can use to filter instances\n by any custom attribute that you specified when you registered the instance. Only instances\n that match all of the specified key/value pairs will be returned.

" + "members":{ + "arn":{ + "shape":"Arn", + "documentation":"

The full Amazon Resource Name (ARN) for the resource.

" }, - "namespaceName": { - "shape": "AwsCloudMapName", - "documentation": "

The name of the AWS Cloud Map namespace to use.

" + "createdAt":{ + "shape":"Timestamp", + "documentation":"

The Unix epoch timestamp in seconds for when the resource was created.

" }, - "serviceName": { - "shape": "AwsCloudMapName", - "documentation": "

The name of the AWS Cloud Map service to use.

" - } - }, - "documentation": "

An object that represents the AWS Cloud Map service discovery information for your virtual\n node.

" - }, - "UpdateVirtualServiceOutput": { - "type": "structure", - "required": [ - "virtualService" - ], - "members": { - "virtualService": { - "shape": "VirtualServiceData", - "documentation": "

A full description of the virtual service that was updated.

" + "lastUpdatedAt":{ + "shape":"Timestamp", + "documentation":"

The Unix epoch timestamp in seconds for when the resource was last updated.

" + }, + "meshOwner":{ + "shape":"AccountId", + "documentation":"

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

" + }, + "resourceOwner":{ + "shape":"AccountId", + "documentation":"

The AWS IAM account ID of the resource owner. If the account ID is not your own, then it's the ID of the mesh owner or of another account that the mesh is shared with. For more information about mesh sharing, see Working with shared meshes.

" + }, + "uid":{ + "shape":"String", + "documentation":"

The unique identifier for the resource.

" + }, + "version":{ + "shape":"Long", + "documentation":"

The version of the resource. Resources are created at version 1, and this version is incremented each time that they're updated.

" } }, - "documentation": "", - "payload": "virtualService" + "documentation":"

An object that represents metadata for a resource.

" }, - "MeshStatus": { - "type": "structure", - "members": { - "status": { - "shape": "MeshStatusCode", - "documentation": "

The current mesh status.

" - } - }, - "documentation": "

An object that represents the status of a service mesh.

" + "ResourceName":{ + "type":"string", + "max":255, + "min":1 }, - "CreateVirtualNodeInput": { - "type": "structure", - "required": [ + "RouteData":{ + "type":"structure", + "required":[ "meshName", + "metadata", + "routeName", "spec", - "virtualNodeName" + "status", + "virtualRouterName" ], - "members": { - "clientToken": { - "shape": "String", - "documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the\nrequest. Up to 36 letters, numbers, hyphens, and underscores are allowed.

", - "idempotencyToken": true - }, - "meshName": { - "shape": "ResourceName", - "documentation": "

The name of the service mesh to create the virtual node in.

", - "location": "uri", - "locationName": "meshName" - }, - "meshOwner": { - "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then\n the account that you specify must share the mesh with your account before you can create \n the resource in the service mesh. For more information about mesh sharing, see Working with shared meshes.

", - "location": "querystring", - "locationName": "meshOwner" - }, - "spec": { - "shape": "VirtualNodeSpec", - "documentation": "

The virtual node specification to apply.

" - }, - "tags": { - "shape": "TagList", - "documentation": "

Optional metadata that you can apply to the virtual node to assist with categorization\n and organization. Each tag consists of a key and an optional value, both of which you\n define. Tag keys can have a maximum character length of 128 characters, and tag values can have\n a maximum length of 256 characters.

", - "tags": [ - "not-preview" - ] - }, - "virtualNodeName": { - "shape": "ResourceName", - "documentation": "

The name to use for the virtual node.

" - } - }, - "documentation": "" - }, - "NotFoundException": { - "type": "structure", - "members": { - "message": { - "shape": "String" - } - }, - "documentation": "

The specified resource doesn't exist. Check your request syntax and try again.

", - "exception": true, - "error": { - "code": "NotFoundException", - "httpStatusCode": 404, - "senderFault": true - } - }, - "RouteSpec": { - "type": "structure", - "members": { - "grpcRoute": { - "shape": "GrpcRoute", - "documentation": "

An object that represents the specification of a gRPC route.

" + "members":{ + "meshName":{ + "shape":"ResourceName", + "documentation":"

The name of the service mesh that the route resides in.

" + }, + "metadata":{ + "shape":"ResourceMetadata", + "documentation":"

The associated metadata for the route.

" }, - "http2Route": { - "shape": "HttpRoute", - "documentation": "

An object that represents the specification of an HTTP/2 route.

" + "routeName":{ + "shape":"ResourceName", + "documentation":"

The name of the route.

" }, - "httpRoute": { - "shape": "HttpRoute", - "documentation": "

An object that represents the specification of an HTTP route.

" + "spec":{ + "shape":"RouteSpec", + "documentation":"

The specifications of the route.

" }, - "priority": { - "shape": "RoutePriority", - "documentation": "

The priority for the route. Routes are matched based on the specified value, where 0 is\n the highest priority.

" + "status":{ + "shape":"RouteStatus", + "documentation":"

The status of the route.

" }, - "tcpRoute": { - "shape": "TcpRoute", - "documentation": "

An object that represents the specification of a TCP route.

" + "virtualRouterName":{ + "shape":"ResourceName", + "documentation":"

The virtual router that the route is associated with.

" } }, - "documentation": "

An object that represents a route specification. Specify one route type.

" + "documentation":"

An object that represents a route returned by a describe operation.

" + }, + "RouteList":{ + "type":"list", + "member":{"shape":"RouteRef"} + }, + "RoutePriority":{ + "type":"integer", + "box":true, + "max":1000, + "min":0 }, - "GatewayRouteRef": { - "type": "structure", - "required": [ + "RouteRef":{ + "type":"structure", + "required":[ "arn", "createdAt", - "gatewayRouteName", "lastUpdatedAt", "meshName", "meshOwner", "resourceOwner", + "routeName", "version", - "virtualGatewayName" + "virtualRouterName" ], - "members": { - "arn": { - "shape": "Arn", - "documentation": "

The full Amazon Resource Name (ARN) for the gateway route.

" + "members":{ + "arn":{ + "shape":"Arn", + "documentation":"

The full Amazon Resource Name (ARN) for the route.

" }, - "createdAt": { - "shape": "Timestamp", - "documentation": "

The Unix epoch timestamp in seconds for when the resource was created.

" + "createdAt":{ + "shape":"Timestamp", + "documentation":"

The Unix epoch timestamp in seconds for when the resource was created.

" }, - "gatewayRouteName": { - "shape": "ResourceName", - "documentation": "

The name of the gateway route.

" + "lastUpdatedAt":{ + "shape":"Timestamp", + "documentation":"

The Unix epoch timestamp in seconds for when the resource was last updated.

" }, - "lastUpdatedAt": { - "shape": "Timestamp", - "documentation": "

The Unix epoch timestamp in seconds for when the resource was last updated.

" + "meshName":{ + "shape":"ResourceName", + "documentation":"

The name of the service mesh that the route resides in.

" }, - "meshName": { - "shape": "ResourceName", - "documentation": "

The name of the service mesh that the resource resides in.

" + "meshOwner":{ + "shape":"AccountId", + "documentation":"

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

" }, - "meshOwner": { - "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

" + "resourceOwner":{ + "shape":"AccountId", + "documentation":"

The AWS IAM account ID of the resource owner. If the account ID is not your own, then it's the ID of the mesh owner or of another account that the mesh is shared with. For more information about mesh sharing, see Working with shared meshes.

" }, - "resourceOwner": { - "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the resource owner. If the account ID is not your own, then it's\n the ID of the mesh owner or of another account that the mesh is shared with. For more information about mesh sharing, see Working with shared meshes.

" + "routeName":{ + "shape":"ResourceName", + "documentation":"

The name of the route.

" }, - "version": { - "shape": "Long", - "documentation": "

The version of the resource. Resources are created at version 1, and this version is incremented each time that they're updated.

" + "version":{ + "shape":"Long", + "documentation":"

The version of the resource. Resources are created at version 1, and this version is incremented each time that they're updated.

" }, - "virtualGatewayName": { - "shape": "ResourceName", - "documentation": "

The virtual gateway that the gateway route is associated with.

" + "virtualRouterName":{ + "shape":"ResourceName", + "documentation":"

The virtual router that the route is associated with.

" } }, - "documentation": "

An object that represents a gateway route returned by a list operation.

" + "documentation":"

An object that represents a route returned by a list operation.

" }, - "VirtualGatewayListenerTlsAcmCertificate": { - "type": "structure", - "required": [ - "certificateArn" - ], - "members": { - "certificateArn": { - "shape": "Arn", - "documentation": "

The Amazon Resource Name (ARN) for the certificate. The certificate must meet specific requirements and you must have proxy authorization enabled. For more information, see Transport Layer Security (TLS).

" + "RouteSpec":{ + "type":"structure", + "members":{ + "grpcRoute":{ + "shape":"GrpcRoute", + "documentation":"

An object that represents the specification of a gRPC route.

" + }, + "http2Route":{ + "shape":"HttpRoute", + "documentation":"

An object that represents the specification of an HTTP/2 route.

" + }, + "httpRoute":{ + "shape":"HttpRoute", + "documentation":"

An object that represents the specification of an HTTP route.

" + }, + "priority":{ + "shape":"RoutePriority", + "documentation":"

The priority for the route. Routes are matched based on the specified value, where 0 is the highest priority.

" + }, + "tcpRoute":{ + "shape":"TcpRoute", + "documentation":"

An object that represents the specification of a TCP route.

" } }, - "documentation": "

An object that represents an AWS Certicate Manager (ACM) certificate.

" + "documentation":"

An object that represents a route specification. Specify one route type.

" }, - "ListGatewayRoutesOutput": { - "type": "structure", - "required": [ - "gatewayRoutes" - ], - "members": { - "gatewayRoutes": { - "shape": "GatewayRouteList", - "documentation": "

The list of existing gateway routes for the specified service mesh and virtual\n gateway.

" - }, - "nextToken": { - "shape": "String", - "documentation": "

The nextToken value to include in a future ListGatewayRoutes\n request. When the results of a ListGatewayRoutes request exceed\n limit, you can use this value to retrieve the next page of results. This\n value is null when there are no more results to return.

" + "RouteStatus":{ + "type":"structure", + "required":["status"], + "members":{ + "status":{ + "shape":"RouteStatusCode", + "documentation":"

The current status for the route.

" } - } + }, + "documentation":"

An object that represents the current status of a route.

" + }, + "RouteStatusCode":{ + "type":"string", + "enum":[ + "ACTIVE", + "INACTIVE", + "DELETED" + ] }, - "CreateVirtualServiceOutput": { - "type": "structure", - "required": [ - "virtualService" + "ServiceDiscovery":{ + "type":"structure", + "members":{ + "awsCloudMap":{ + "shape":"AwsCloudMapServiceDiscovery", + "documentation":"

Specifies any AWS Cloud Map information for the virtual node.

" + }, + "dns":{ + "shape":"DnsServiceDiscovery", + "documentation":"

Specifies the DNS information for the virtual node.

" + } + }, + "documentation":"

An object that represents the service discovery information for a virtual node.

", + "union":true + }, + "ServiceName":{"type":"string"}, + "ServiceUnavailableException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

The request has failed due to a temporary failure of the service.

", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true, + "retryable":{"throttling":false} + }, + "String":{"type":"string"}, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":50, + "min":0 + }, + "TagList":{ + "type":"list", + "member":{"shape":"TagRef"}, + "max":50, + "min":0 + }, + "TagRef":{ + "type":"structure", + "required":[ + "key", + "value" ], - "members": { - "virtualService": { - "shape": "VirtualServiceData", - "documentation": "

The full description of your virtual service following the create call.

" + "members":{ + "key":{ + "shape":"TagKey", + "documentation":"

One part of a key-value pair that make up a tag. A key is a general label that acts like a category for more specific tag values.

" + }, + "value":{ + "shape":"TagValue", + "documentation":"

The optional part of a key-value pair that make up a tag. A value acts as a descriptor within a tag category (key).

" } }, - "documentation": "", - "payload": "virtualService" + "documentation":"

Optional metadata that you apply to a resource to assist with categorization and organization. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

" }, - "FileAccessLog": { - "type": "structure", - "required": [ - "path" + "TagResourceInput":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" ], - "members": { - "path": { - "shape": "FilePath", - "documentation": "

The file path to write access logs to. You can use /dev/stdout to send\n access logs to standard out and configure your Envoy container to use a log driver, such as\n awslogs, to export the access logs to a log storage service such as Amazon\n CloudWatch Logs. You can also specify a path in the Envoy container's file system to write\n the files to disk.

\n \n

The Envoy process must have write permissions to the path that you specify here.\n Otherwise, Envoy fails to bootstrap properly.

\n
" + "members":{ + "resourceArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the resource to add tags to.

", + "location":"querystring", + "locationName":"resourceArn" + }, + "tags":{ + "shape":"TagList", + "documentation":"

The tags to add to the resource. A tag is an array of key-value pairs. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

" } }, - "documentation": "

An object that represents an access log file.

" + "documentation":"" }, - "VirtualRouterServiceProvider": { - "type": "structure", - "required": [ - "virtualRouterName" - ], - "members": { - "virtualRouterName": { - "shape": "ResourceName", - "documentation": "

The name of the virtual router that is acting as a service provider.

" - } + "TagResourceOutput":{ + "type":"structure", + "members":{ }, - "documentation": "

An object that represents a virtual node service provider.

" + "documentation":"" + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0 + }, + "TagsLimit":{ + "type":"integer", + "box":true, + "max":50, + "min":1 + }, + "TcpRetryPolicyEvent":{ + "type":"string", + "enum":["connection-error"] }, - "HttpTimeout": { - "type": "structure", - "members": { - "idle": { - "shape": "Duration" + "TcpRetryPolicyEvents":{ + "type":"list", + "member":{"shape":"TcpRetryPolicyEvent"}, + "max":1, + "min":1 + }, + "TcpRoute":{ + "type":"structure", + "required":["action"], + "members":{ + "action":{ + "shape":"TcpRouteAction", + "documentation":"

The action to take if a match is determined.

" }, - "perRequest": { - "shape": "Duration" + "timeout":{ + "shape":"TcpTimeout", + "documentation":"

An object that represents types of timeouts.

" } }, - "documentation": "

An object that represents types of timeouts.

" + "documentation":"

An object that represents a TCP route type.

" }, - "DeleteVirtualServiceInput": { - "type": "structure", - "required": [ - "meshName", - "virtualServiceName" - ], - "members": { - "meshName": { - "shape": "ResourceName", - "documentation": "

The name of the service mesh to delete the virtual service in.

", - "location": "uri", - "locationName": "meshName" - }, - "meshOwner": { - "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", - "location": "querystring", - "locationName": "meshOwner" - }, - "virtualServiceName": { - "shape": "ServiceName", - "documentation": "

The name of the virtual service to delete.

", - "location": "uri", - "locationName": "virtualServiceName" - } - }, - "documentation": "" - }, - "TlsValidationContext": { - "type": "structure", - "required": [ - "trust" - ], - "members": { - "trust": { - "shape": "TlsValidationContextTrust", - "documentation": "

A reference to an object that represents a TLS validation context trust.

" + "TcpRouteAction":{ + "type":"structure", + "required":["weightedTargets"], + "members":{ + "weightedTargets":{ + "shape":"WeightedTargets", + "documentation":"

An object that represents the targets that traffic is routed to when a request matches the route.

" } }, - "documentation": "

An object that represents a Transport Layer Security (TLS) validation context.

" - }, - "GatewayRouteStatusCode": { - "type": "string", - "enum": [ - "ACTIVE", - "DELETED", - "INACTIVE" - ] + "documentation":"

An object that represents the action to take if a match is determined.

" }, - "DeleteVirtualRouterOutput": { - "type": "structure", - "required": [ - "virtualRouter" - ], - "members": { - "virtualRouter": { - "shape": "VirtualRouterData", - "documentation": "

The virtual router that was deleted.

" + "TcpTimeout":{ + "type":"structure", + "members":{ + "idle":{ + "shape":"Duration", + "documentation":"

An object that represents an idle timeout. An idle timeout bounds the amount of time that a connection may be idle. The default value is none.

" } }, - "documentation": "", - "payload": "virtualRouter" + "documentation":"

An object that represents types of timeouts.

" }, - "DescribeVirtualGatewayInput": { - "type": "structure", - "required": [ - "meshName", - "virtualGatewayName" - ], - "members": { - "meshName": { - "shape": "ResourceName", - "documentation": "

The name of the service mesh that the gateway route resides in.

", - "location": "uri", - "locationName": "meshName" - }, - "meshOwner": { - "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", - "location": "querystring", - "locationName": "meshOwner" - }, - "virtualGatewayName": { - "shape": "ResourceName", - "documentation": "

The name of the virtual gateway to describe.

", - "location": "uri", - "locationName": "virtualGatewayName" + "Timestamp":{"type":"timestamp"}, + "TlsValidationContext":{ + "type":"structure", + "required":["trust"], + "members":{ + "trust":{ + "shape":"TlsValidationContextTrust", + "documentation":"

A reference to an object that represents a TLS validation context trust.

" } - } - }, - "TagsLimit": { - "type": "integer", - "box": true, - "min": 1, - "max": 50 + }, + "documentation":"

An object that represents a Transport Layer Security (TLS) validation context.

" }, - "GrpcGatewayRouteAction": { - "type": "structure", - "required": [ - "target" - ], - "members": { - "target": { - "shape": "GatewayRouteTarget", - "documentation": "

An object that represents the target that traffic is routed to when a request matches the gateway route.

" + "TlsValidationContextAcmTrust":{ + "type":"structure", + "required":["certificateAuthorityArns"], + "members":{ + "certificateAuthorityArns":{ + "shape":"CertificateAuthorityArns", + "documentation":"

One or more ACM Amazon Resource Name (ARN)s.

" } }, - "documentation": "

An object that represents the action to take if a match is determined.

" + "documentation":"

An object that represents a TLS validation context trust for an AWS Certicate Manager (ACM) certificate.

" }, - "DeleteVirtualNodeOutput": { - "type": "structure", - "required": [ - "virtualNode" - ], - "members": { - "virtualNode": { - "shape": "VirtualNodeData", - "documentation": "

The virtual node that was deleted.

" + "TlsValidationContextFileTrust":{ + "type":"structure", + "required":["certificateChain"], + "members":{ + "certificateChain":{ + "shape":"FilePath", + "documentation":"

The certificate trust chain for a certificate stored on the file system of the virtual node that the proxy is running on.

" } }, - "documentation": "", - "payload": "virtualNode" + "documentation":"

An object that represents a Transport Layer Security (TLS) validation context trust for a local file.

" }, - "UpdateVirtualNodeInput": { - "type": "structure", - "required": [ - "meshName", - "spec", - "virtualNodeName" - ], - "members": { - "clientToken": { - "shape": "String", - "documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the\nrequest. Up to 36 letters, numbers, hyphens, and underscores are allowed.

", - "idempotencyToken": true - }, - "meshName": { - "shape": "ResourceName", - "documentation": "

The name of the service mesh that the virtual node resides in.

", - "location": "uri", - "locationName": "meshName" - }, - "meshOwner": { - "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", - "location": "querystring", - "locationName": "meshOwner" - }, - "spec": { - "shape": "VirtualNodeSpec", - "documentation": "

The new virtual node specification to apply. This overwrites the existing data.

" - }, - "virtualNodeName": { - "shape": "ResourceName", - "documentation": "

The name of the virtual node to update.

", - "location": "uri", - "locationName": "virtualNodeName" - } - }, - "documentation": "" - }, - "ListenerTls": { - "type": "structure", - "required": [ - "certificate", - "mode" - ], - "members": { - "certificate": { - "shape": "ListenerTlsCertificate", - "documentation": "

A reference to an object that represents a listener's TLS certificate.

" + "TlsValidationContextTrust":{ + "type":"structure", + "members":{ + "acm":{ + "shape":"TlsValidationContextAcmTrust", + "documentation":"

A reference to an object that represents a TLS validation context trust for an AWS Certicate Manager (ACM) certificate.

" }, - "mode": { - "shape": "ListenerTlsMode", - "documentation": "

Specify one of the following modes.

\n
    \n
  • \n

    \n STRICT – Listener only accepts connections with TLS\n enabled.

    \n
  • \n
  • \n

    \n PERMISSIVE – Listener accepts connections with or\n without TLS enabled.

    \n
  • \n
  • \n

    \n DISABLED – Listener only accepts connections without\n TLS.

    \n
  • \n
" + "file":{ + "shape":"TlsValidationContextFileTrust", + "documentation":"

An object that represents a TLS validation context trust for a local file.

" } }, - "documentation": "

An object that represents the Transport Layer Security (TLS) properties for a listener.

" + "documentation":"

An object that represents a Transport Layer Security (TLS) validation context trust.

", + "union":true }, - "DeleteMeshInput": { - "type": "structure", - "required": [ - "meshName" - ], - "members": { - "meshName": { - "shape": "ResourceName", - "documentation": "

The name of the service mesh to delete.

", - "location": "uri", - "locationName": "meshName" - } + "TooManyRequestsException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} }, - "documentation": "" + "documentation":"

The maximum request rate permitted by the App Mesh APIs has been exceeded for your account. For best results, use an increasing or variable sleep interval between requests.

", + "error":{ + "httpStatusCode":429, + "senderFault":true + }, + "exception":true, + "retryable":{"throttling":true} }, - "TcpRetryPolicyEvents": { - "type": "list", - "member": { - "shape": "TcpRetryPolicyEvent" + "TooManyTagsException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

The request exceeds the maximum allowed number of tags allowed per resource. The current limit is 50 user tags per resource. You must reduce the number of tags in the request. None of the tags in this request were applied.

", + "error":{ + "httpStatusCode":400, + "senderFault":true }, - "min": 1, - "max": 1 + "exception":true }, - "CreateVirtualServiceInput": { - "type": "structure", - "required": [ - "meshName", - "spec", - "virtualServiceName" - ], - "members": { - "clientToken": { - "shape": "String", - "documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the\nrequest. Up to 36 letters, numbers, hyphens, and underscores are allowed.

", - "idempotencyToken": true - }, - "meshName": { - "shape": "ResourceName", - "documentation": "

The name of the service mesh to create the virtual service in.

", - "location": "uri", - "locationName": "meshName" - }, - "meshOwner": { - "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then\n the account that you specify must share the mesh with your account before you can create \n the resource in the service mesh. For more information about mesh sharing, see Working with shared meshes.

", - "location": "querystring", - "locationName": "meshOwner" - }, - "spec": { - "shape": "VirtualServiceSpec", - "documentation": "

The virtual service specification to apply.

" - }, - "tags": { - "shape": "TagList", - "documentation": "

Optional metadata that you can apply to the virtual service to assist with\n categorization and organization. Each tag consists of a key and an optional value, both of\n which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have\n a maximum length of 256 characters.

", - "tags": [ - "not-preview" - ] - }, - "virtualServiceName": { - "shape": "ServiceName", - "documentation": "

The name to use for the virtual service.

" - } - }, - "documentation": "" - }, - "UpdateVirtualRouterInput": { - "type": "structure", - "required": [ - "meshName", - "spec", - "virtualRouterName" - ], - "members": { - "clientToken": { - "shape": "String", - "documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the\nrequest. Up to 36 letters, numbers, hyphens, and underscores are allowed.

", - "idempotencyToken": true - }, - "meshName": { - "shape": "ResourceName", - "documentation": "

The name of the service mesh that the virtual router resides in.

", - "location": "uri", - "locationName": "meshName" - }, - "meshOwner": { - "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", - "location": "querystring", - "locationName": "meshOwner" - }, - "spec": { - "shape": "VirtualRouterSpec", - "documentation": "

The new virtual router specification to apply. This overwrites the existing data.

" - }, - "virtualRouterName": { - "shape": "ResourceName", - "documentation": "

The name of the virtual router to update.

", - "location": "uri", - "locationName": "virtualRouterName" - } - }, - "documentation": "" - }, - "HttpGatewayRouteAction": { - "type": "structure", - "required": [ - "target" + "UntagResourceInput":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" ], - "members": { - "target": { - "shape": "GatewayRouteTarget", - "documentation": "

An object that represents the target that traffic is routed to when a request matches the gateway route.

" + "members":{ + "resourceArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the resource to delete tags from.

", + "location":"querystring", + "locationName":"resourceArn" + }, + "tagKeys":{ + "shape":"TagKeyList", + "documentation":"

The keys of the tags to be removed.

" } }, - "documentation": "

An object that represents the action to take if a match is determined.

" + "documentation":"" }, - "GrpcGatewayRouteMatch": { - "type": "structure", - "members": { - "serviceName": { - "shape": "ServiceName", - "documentation": "

The fully qualified domain name for the service to match from the request.

" - } + "UntagResourceOutput":{ + "type":"structure", + "members":{ }, - "documentation": "

An object that represents the criteria for determining a request match.

" + "documentation":"" }, - "ListTagsForResourceInput": { - "type": "structure", - "required": [ - "resourceArn" + "UpdateGatewayRouteInput":{ + "type":"structure", + "required":[ + "gatewayRouteName", + "meshName", + "spec", + "virtualGatewayName" ], - "members": { - "limit": { - "shape": "TagsLimit", - "documentation": "

The maximum number of tag results returned by ListTagsForResource in\n paginated output. When this parameter is used, ListTagsForResource returns\n only limit results in a single page along with a nextToken\n response element. You can see the remaining results of the initial request by sending\n another ListTagsForResource request with the returned nextToken\n value. This value can be between 1 and 100. If you don't use\n this parameter, ListTagsForResource returns up to 100\n results and a nextToken value if applicable.

", - "location": "querystring", - "locationName": "limit" - }, - "nextToken": { - "shape": "String", - "documentation": "

The nextToken value returned from a previous paginated\n ListTagsForResource request where limit was used and the\n results exceeded the value of that parameter. Pagination continues from the end of the\n previous results that returned the nextToken value.

", - "location": "querystring", - "locationName": "nextToken" - }, - "resourceArn": { - "shape": "Arn", - "documentation": "

The Amazon Resource Name (ARN) that identifies the resource to list the tags for.

", - "location": "querystring", - "locationName": "resourceArn" - } - }, - "documentation": "" - }, - "GrpcRetryPolicyEvents": { - "type": "list", - "member": { - "shape": "GrpcRetryPolicyEvent" - }, - "min": 1, - "max": 5 - }, - "VirtualGatewayStatusCode": { - "type": "string", - "enum": [ - "ACTIVE", - "DELETED", - "INACTIVE" - ] - }, - "ServiceUnavailableException": { - "type": "structure", - "members": { - "message": { - "shape": "String" + "members":{ + "clientToken":{ + "shape":"String", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Up to 36 letters, numbers, hyphens, and underscores are allowed.

", + "idempotencyToken":true + }, + "gatewayRouteName":{ + "shape":"ResourceName", + "documentation":"

The name of the gateway route to update.

", + "location":"uri", + "locationName":"gatewayRouteName" + }, + "meshName":{ + "shape":"ResourceName", + "documentation":"

The name of the service mesh that the gateway route resides in.

", + "location":"uri", + "locationName":"meshName" + }, + "meshOwner":{ + "shape":"AccountId", + "documentation":"

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", + "location":"querystring", + "locationName":"meshOwner" + }, + "spec":{ + "shape":"GatewayRouteSpec", + "documentation":"

The new gateway route specification to apply. This overwrites the existing data.

" + }, + "virtualGatewayName":{ + "shape":"ResourceName", + "documentation":"

The name of the virtual gateway that the gateway route is associated with.

", + "location":"uri", + "locationName":"virtualGatewayName" } - }, - "documentation": "

The request has failed due to a temporary failure of the service.

", - "exception": true, - "error": { - "code": "ServiceUnavailableException", - "httpStatusCode": 503, - "fault": true } }, - "DescribeMeshOutput": { - "type": "structure", - "required": [ - "mesh" - ], - "members": { - "mesh": { - "shape": "MeshData", - "documentation": "

The full description of your service mesh.

" + "UpdateGatewayRouteOutput":{ + "type":"structure", + "required":["gatewayRoute"], + "members":{ + "gatewayRoute":{ + "shape":"GatewayRouteData", + "documentation":"

A full description of the gateway route that was updated.

" } }, - "documentation": "", - "payload": "mesh" + "payload":"gatewayRoute" }, - "DeleteVirtualRouterInput": { - "type": "structure", - "required": [ - "meshName", - "virtualRouterName" - ], - "members": { - "meshName": { - "shape": "ResourceName", - "documentation": "

The name of the service mesh to delete the virtual router in.

", - "location": "uri", - "locationName": "meshName" - }, - "meshOwner": { - "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", - "location": "querystring", - "locationName": "meshOwner" - }, - "virtualRouterName": { - "shape": "ResourceName", - "documentation": "

The name of the virtual router to delete.

", - "location": "uri", - "locationName": "virtualRouterName" - } - }, - "documentation": "" - }, - "UpdateGatewayRouteOutput": { - "type": "structure", - "required": [ - "gatewayRoute" - ], - "members": { - "gatewayRoute": { - "shape": "GatewayRouteData", - "documentation": "

A full description of the gateway route that was updated.

" + "UpdateMeshInput":{ + "type":"structure", + "required":["meshName"], + "members":{ + "clientToken":{ + "shape":"String", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Up to 36 letters, numbers, hyphens, and underscores are allowed.

", + "idempotencyToken":true + }, + "meshName":{ + "shape":"ResourceName", + "documentation":"

The name of the service mesh to update.

", + "location":"uri", + "locationName":"meshName" + }, + "spec":{ + "shape":"MeshSpec", + "documentation":"

The service mesh specification to apply.

" } }, - "payload": "gatewayRoute" + "documentation":"" + }, + "UpdateMeshOutput":{ + "type":"structure", + "required":["mesh"], + "members":{ + "mesh":{"shape":"MeshData"} + }, + "documentation":"", + "payload":"mesh" }, - "DescribeRouteInput": { - "type": "structure", - "required": [ + "UpdateRouteInput":{ + "type":"structure", + "required":[ "meshName", "routeName", + "spec", "virtualRouterName" ], - "members": { - "meshName": { - "shape": "ResourceName", - "documentation": "

The name of the service mesh that the route resides in.

", - "location": "uri", - "locationName": "meshName" - }, - "meshOwner": { - "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", - "location": "querystring", - "locationName": "meshOwner" - }, - "routeName": { - "shape": "ResourceName", - "documentation": "

The name of the route to describe.

", - "location": "uri", - "locationName": "routeName" - }, - "virtualRouterName": { - "shape": "ResourceName", - "documentation": "

The name of the virtual router that the route is associated with.

", - "location": "uri", - "locationName": "virtualRouterName" - } - }, - "documentation": "" - }, - "DeleteRouteOutput": { - "type": "structure", - "required": [ - "route" + "members":{ + "clientToken":{ + "shape":"String", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Up to 36 letters, numbers, hyphens, and underscores are allowed.

", + "idempotencyToken":true + }, + "meshName":{ + "shape":"ResourceName", + "documentation":"

The name of the service mesh that the route resides in.

", + "location":"uri", + "locationName":"meshName" + }, + "meshOwner":{ + "shape":"AccountId", + "documentation":"

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", + "location":"querystring", + "locationName":"meshOwner" + }, + "routeName":{ + "shape":"ResourceName", + "documentation":"

The name of the route to update.

", + "location":"uri", + "locationName":"routeName" + }, + "spec":{ + "shape":"RouteSpec", + "documentation":"

The new route specification to apply. This overwrites the existing data.

" + }, + "virtualRouterName":{ + "shape":"ResourceName", + "documentation":"

The name of the virtual router that the route is associated with.

", + "location":"uri", + "locationName":"virtualRouterName" + } + }, + "documentation":"" + }, + "UpdateRouteOutput":{ + "type":"structure", + "required":["route"], + "members":{ + "route":{ + "shape":"RouteData", + "documentation":"

A full description of the route that was updated.

" + } + }, + "documentation":"", + "payload":"route" + }, + "UpdateVirtualGatewayInput":{ + "type":"structure", + "required":[ + "meshName", + "spec", + "virtualGatewayName" ], - "members": { - "route": { - "shape": "RouteData", - "documentation": "

The route that was deleted.

" + "members":{ + "clientToken":{ + "shape":"String", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Up to 36 letters, numbers, hyphens, and underscores are allowed.

", + "idempotencyToken":true + }, + "meshName":{ + "shape":"ResourceName", + "documentation":"

The name of the service mesh that the virtual gateway resides in.

", + "location":"uri", + "locationName":"meshName" + }, + "meshOwner":{ + "shape":"AccountId", + "documentation":"

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", + "location":"querystring", + "locationName":"meshOwner" + }, + "spec":{ + "shape":"VirtualGatewaySpec", + "documentation":"

The new virtual gateway specification to apply. This overwrites the existing data.

" + }, + "virtualGatewayName":{ + "shape":"ResourceName", + "documentation":"

The name of the virtual gateway to update.

", + "location":"uri", + "locationName":"virtualGatewayName" } - }, - "documentation": "", - "payload": "route" - }, - "Listeners": { - "type": "list", - "member": { - "shape": "Listener" - }, - "min": 0, - "max": 1 - }, - "Backends": { - "type": "list", - "member": { - "shape": "Backend" } }, - "PortProtocol": { - "type": "string", - "enum": [ - "grpc", - "http", - "http2", - "tcp" - ] - }, - "DeleteGatewayRouteOutput": { - "type": "structure", - "required": [ - "gatewayRoute" - ], - "members": { - "gatewayRoute": { - "shape": "GatewayRouteData", - "documentation": "

The gateway route that was deleted.

" + "UpdateVirtualGatewayOutput":{ + "type":"structure", + "required":["virtualGateway"], + "members":{ + "virtualGateway":{ + "shape":"VirtualGatewayData", + "documentation":"

A full description of the virtual gateway that was updated.

" } }, - "payload": "gatewayRoute" - }, - "VirtualGatewayList": { - "type": "list", - "member": { - "shape": "VirtualGatewayRef" - } - }, - "VirtualNodeStatusCode": { - "type": "string", - "enum": [ - "ACTIVE", - "DELETED", - "INACTIVE" - ] - }, - "ServiceName": { - "type": "string" + "payload":"virtualGateway" }, - "UpdateVirtualServiceInput": { - "type": "structure", - "required": [ + "UpdateVirtualNodeInput":{ + "type":"structure", + "required":[ "meshName", "spec", - "virtualServiceName" - ], - "members": { - "clientToken": { - "shape": "String", - "documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the\nrequest. Up to 36 letters, numbers, hyphens, and underscores are allowed.

", - "idempotencyToken": true - }, - "meshName": { - "shape": "ResourceName", - "documentation": "

The name of the service mesh that the virtual service resides in.

", - "location": "uri", - "locationName": "meshName" - }, - "meshOwner": { - "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", - "location": "querystring", - "locationName": "meshOwner" - }, - "spec": { - "shape": "VirtualServiceSpec", - "documentation": "

The new virtual service specification to apply. This overwrites the existing\n data.

" - }, - "virtualServiceName": { - "shape": "ServiceName", - "documentation": "

The name of the virtual service to update.

", - "location": "uri", - "locationName": "virtualServiceName" - } - }, - "documentation": "" - }, - "HealthCheckThreshold": { - "type": "integer", - "min": 2, - "max": 10 - }, - "UpdateRouteOutput": { - "type": "structure", - "required": [ - "route" - ], - "members": { - "route": { - "shape": "RouteData", - "documentation": "

A full description of the route that was updated.

" - } - }, - "documentation": "", - "payload": "route" - }, - "PercentInt": { - "type": "integer", - "min": 0, - "max": 100 - }, - "MethodName": { - "type": "string", - "min": 1, - "max": 50 - }, - "TagValue": { - "type": "string", - "min": 0, - "max": 256 - }, - "HttpRouteAction": { - "type": "structure", - "required": [ - "weightedTargets" - ], - "members": { - "weightedTargets": { - "shape": "WeightedTargets", - "documentation": "

An object that represents the targets that traffic is routed to when a request matches the route.

" - } - }, - "documentation": "

An object that represents the action to take if a match is determined.

" - }, - "ListRoutesInput": { - "type": "structure", - "required": [ - "meshName", - "virtualRouterName" - ], - "members": { - "limit": { - "shape": "ListRoutesLimit", - "documentation": "

The maximum number of results returned by ListRoutes in paginated output.\n When you use this parameter, ListRoutes returns only limit\n results in a single page along with a nextToken response element. You can see\n the remaining results of the initial request by sending another ListRoutes\n request with the returned nextToken value. This value can be between\n 1 and 100. If you don't use this parameter,\n ListRoutes returns up to 100 results and a\n nextToken value if applicable.

", - "location": "querystring", - "locationName": "limit" - }, - "meshName": { - "shape": "ResourceName", - "documentation": "

The name of the service mesh to list routes in.

", - "location": "uri", - "locationName": "meshName" - }, - "meshOwner": { - "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", - "location": "querystring", - "locationName": "meshOwner" - }, - "nextToken": { - "shape": "String", - "documentation": "

The nextToken value returned from a previous paginated\n ListRoutes request where limit was used and the results\n exceeded the value of that parameter. Pagination continues from the end of the previous\n results that returned the nextToken value.

", - "location": "querystring", - "locationName": "nextToken" - }, - "virtualRouterName": { - "shape": "ResourceName", - "documentation": "

The name of the virtual router to list routes in.

", - "location": "uri", - "locationName": "virtualRouterName" - } - }, - "documentation": "" - }, - "VirtualServiceRef": { - "type": "structure", - "required": [ - "arn", - "createdAt", - "lastUpdatedAt", - "meshName", - "meshOwner", - "resourceOwner", - "version", - "virtualServiceName" + "virtualNodeName" ], - "members": { - "arn": { - "shape": "Arn", - "documentation": "

The full Amazon Resource Name (ARN) for the virtual service.

" - }, - "createdAt": { - "shape": "Timestamp", - "documentation": "

The Unix epoch timestamp in seconds for when the resource was created.

" + "members":{ + "clientToken":{ + "shape":"String", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Up to 36 letters, numbers, hyphens, and underscores are allowed.

", + "idempotencyToken":true }, - "lastUpdatedAt": { - "shape": "Timestamp", - "documentation": "

The Unix epoch timestamp in seconds for when the resource was last updated.

" + "meshName":{ + "shape":"ResourceName", + "documentation":"

The name of the service mesh that the virtual node resides in.

", + "location":"uri", + "locationName":"meshName" }, - "meshName": { - "shape": "ResourceName", - "documentation": "

The name of the service mesh that the virtual service resides in.

" + "meshOwner":{ + "shape":"AccountId", + "documentation":"

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", + "location":"querystring", + "locationName":"meshOwner" }, - "meshOwner": { - "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

" - }, - "resourceOwner": { - "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the resource owner. If the account ID is not your own, then it's\n the ID of the mesh owner or of another account that the mesh is shared with. For more information about mesh sharing, see Working with shared meshes.

" - }, - "version": { - "shape": "Long", - "documentation": "

The version of the resource. Resources are created at version 1, and this version is incremented each time that they're updated.

" - }, - "virtualServiceName": { - "shape": "ServiceName", - "documentation": "

The name of the virtual service.

" - } - }, - "documentation": "

An object that represents a virtual service returned by a list operation.

" - }, - "GrpcTimeout": { - "type": "structure", - "members": { - "idle": { - "shape": "Duration", - "documentation": "

An object that represents an idle timeout. An idle timeout bounds the amount of time that a connection may be idle. The default value is none.

" + "spec":{ + "shape":"VirtualNodeSpec", + "documentation":"

The new virtual node specification to apply. This overwrites the existing data.

" }, - "perRequest": { - "shape": "Duration", - "documentation": "

An object that represents a per request timeout. The default value is 15 seconds. If you set a higher timeout, then make sure that the higher value is set for each App Mesh resource in a conversation. For example, if a virtual node backend uses a virtual router provider to route to another virtual node, then the timeout should be greater than 15 seconds for the source and destination virtual node and the route.

" - } - }, - "documentation": "

An object that represents types of timeouts.

" - }, - "VirtualNodeStatus": { - "type": "structure", - "required": [ - "status" - ], - "members": { - "status": { - "shape": "VirtualNodeStatusCode", - "documentation": "

The current status of the virtual node.

" + "virtualNodeName":{ + "shape":"ResourceName", + "documentation":"

The name of the virtual node to update.

", + "location":"uri", + "locationName":"virtualNodeName" } }, - "documentation": "

An object that represents the current status of the virtual node.

" + "documentation":"" }, - "VirtualRouterRef": { - "type": "structure", - "required": [ - "arn", - "createdAt", - "lastUpdatedAt", - "meshName", - "meshOwner", - "resourceOwner", - "version", - "virtualRouterName" - ], - "members": { - "arn": { - "shape": "Arn", - "documentation": "

The full Amazon Resource Name (ARN) for the virtual router.

" - }, - "createdAt": { - "shape": "Timestamp", - "documentation": "

The Unix epoch timestamp in seconds for when the resource was created.

" - }, - "lastUpdatedAt": { - "shape": "Timestamp", - "documentation": "

The Unix epoch timestamp in seconds for when the resource was last updated.

" - }, - "meshName": { - "shape": "ResourceName", - "documentation": "

The name of the service mesh that the virtual router resides in.

" - }, - "meshOwner": { - "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

" - }, - "resourceOwner": { - "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the resource owner. If the account ID is not your own, then it's\n the ID of the mesh owner or of another account that the mesh is shared with. For more information about mesh sharing, see Working with shared meshes.

" - }, - "version": { - "shape": "Long", - "documentation": "

The version of the resource. Resources are created at version 1, and this version is incremented each time that they're updated.

" - }, - "virtualRouterName": { - "shape": "ResourceName", - "documentation": "

The name of the virtual router.

" + "UpdateVirtualNodeOutput":{ + "type":"structure", + "required":["virtualNode"], + "members":{ + "virtualNode":{ + "shape":"VirtualNodeData", + "documentation":"

A full description of the virtual node that was updated.

" } }, - "documentation": "

An object that represents a virtual router returned by a list operation.

" + "documentation":"", + "payload":"virtualNode" }, - "VirtualServiceData": { - "type": "structure", - "required": [ + "UpdateVirtualRouterInput":{ + "type":"structure", + "required":[ "meshName", - "metadata", "spec", - "status", - "virtualServiceName" + "virtualRouterName" ], - "members": { - "meshName": { - "shape": "ResourceName", - "documentation": "

The name of the service mesh that the virtual service resides in.

" + "members":{ + "clientToken":{ + "shape":"String", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Up to 36 letters, numbers, hyphens, and underscores are allowed.

", + "idempotencyToken":true }, - "metadata": { - "shape": "ResourceMetadata" + "meshName":{ + "shape":"ResourceName", + "documentation":"

The name of the service mesh that the virtual router resides in.

", + "location":"uri", + "locationName":"meshName" }, - "spec": { - "shape": "VirtualServiceSpec", - "documentation": "

The specifications of the virtual service.

" + "meshOwner":{ + "shape":"AccountId", + "documentation":"

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", + "location":"querystring", + "locationName":"meshOwner" }, - "status": { - "shape": "VirtualServiceStatus", - "documentation": "

The current status of the virtual service.

" + "spec":{ + "shape":"VirtualRouterSpec", + "documentation":"

The new virtual router specification to apply. This overwrites the existing data.

" }, - "virtualServiceName": { - "shape": "ServiceName", - "documentation": "

The name of the virtual service.

" + "virtualRouterName":{ + "shape":"ResourceName", + "documentation":"

The name of the virtual router to update.

", + "location":"uri", + "locationName":"virtualRouterName" } }, - "documentation": "

An object that represents a virtual service returned by a describe operation.

" + "documentation":"" }, - "HttpRouteHeader": { - "type": "structure", - "required": [ - "name" - ], - "members": { - "invert": { - "shape": "Boolean", - "documentation": "

Specify True to match anything except the match criteria. The default value is False.

" - }, - "match": { - "shape": "HeaderMatchMethod", - "documentation": "

The HeaderMatchMethod object.

" - }, - "name": { - "shape": "HeaderName", - "documentation": "

A name for the HTTP header in the client request that will be matched on.

" + "UpdateVirtualRouterOutput":{ + "type":"structure", + "required":["virtualRouter"], + "members":{ + "virtualRouter":{ + "shape":"VirtualRouterData", + "documentation":"

A full description of the virtual router that was updated.

" } }, - "documentation": "

An object that represents the HTTP header in the request.

" - }, - "FilePath": { - "type": "string", - "min": 1, - "max": 255 - }, - "AwsCloudMapInstanceAttributes": { - "type": "list", - "member": { - "shape": "AwsCloudMapInstanceAttribute" - } + "documentation":"", + "payload":"virtualRouter" }, - "VirtualNodeRef": { - "type": "structure", - "required": [ - "arn", - "createdAt", - "lastUpdatedAt", + "UpdateVirtualServiceInput":{ + "type":"structure", + "required":[ "meshName", - "meshOwner", - "resourceOwner", - "version", - "virtualNodeName" + "spec", + "virtualServiceName" ], - "members": { - "arn": { - "shape": "Arn", - "documentation": "

The full Amazon Resource Name (ARN) for the virtual node.

" - }, - "createdAt": { - "shape": "Timestamp", - "documentation": "

The Unix epoch timestamp in seconds for when the resource was created.

" - }, - "lastUpdatedAt": { - "shape": "Timestamp", - "documentation": "

The Unix epoch timestamp in seconds for when the resource was last updated.

" + "members":{ + "clientToken":{ + "shape":"String", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Up to 36 letters, numbers, hyphens, and underscores are allowed.

", + "idempotencyToken":true }, - "meshName": { - "shape": "ResourceName", - "documentation": "

The name of the service mesh that the virtual node resides in.

" + "meshName":{ + "shape":"ResourceName", + "documentation":"

The name of the service mesh that the virtual service resides in.

", + "location":"uri", + "locationName":"meshName" }, - "meshOwner": { - "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

" + "meshOwner":{ + "shape":"AccountId", + "documentation":"

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", + "location":"querystring", + "locationName":"meshOwner" }, - "resourceOwner": { - "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the resource owner. If the account ID is not your own, then it's\n the ID of the mesh owner or of another account that the mesh is shared with. For more information about mesh sharing, see Working with shared meshes.

" + "spec":{ + "shape":"VirtualServiceSpec", + "documentation":"

The new virtual service specification to apply. This overwrites the existing data.

" }, - "version": { - "shape": "Long", - "documentation": "

The version of the resource. Resources are created at version 1, and this version is incremented each time that they're updated.

" - }, - "virtualNodeName": { - "shape": "ResourceName", - "documentation": "

The name of the virtual node.

" + "virtualServiceName":{ + "shape":"ServiceName", + "documentation":"

The name of the virtual service to update.

", + "location":"uri", + "locationName":"virtualServiceName" } }, - "documentation": "

An object that represents a virtual node returned by a list operation.

" + "documentation":"" }, - "CreateMeshInput": { - "type": "structure", - "required": [ - "meshName" - ], - "members": { - "clientToken": { - "shape": "String", - "documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the\nrequest. Up to 36 letters, numbers, hyphens, and underscores are allowed.

", - "idempotencyToken": true - }, - "meshName": { - "shape": "ResourceName", - "documentation": "

The name to use for the service mesh.

" - }, - "spec": { - "shape": "MeshSpec", - "documentation": "

The service mesh specification to apply.

" - }, - "tags": { - "shape": "TagList", - "documentation": "

Optional metadata that you can apply to the service mesh to assist with categorization\n and organization. Each tag consists of a key and an optional value, both of which you\n define. Tag keys can have a maximum character length of 128 characters, and tag values can have\n a maximum length of 256 characters.

", - "tags": [ - "not-preview" - ] - } - }, - "documentation": "" - }, - "GrpcRouteAction": { - "type": "structure", - "required": [ - "weightedTargets" - ], - "members": { - "weightedTargets": { - "shape": "WeightedTargets", - "documentation": "

An object that represents the targets that traffic is routed to when a request matches the route.

" + "UpdateVirtualServiceOutput":{ + "type":"structure", + "required":["virtualService"], + "members":{ + "virtualService":{ + "shape":"VirtualServiceData", + "documentation":"

A full description of the virtual service that was updated.

" } }, - "documentation": "

An object that represents the action to take if a match is determined.

" + "documentation":"", + "payload":"virtualService" }, - "VirtualGatewayTlsValidationContextFileTrust": { - "type": "structure", - "required": [ - "certificateChain" - ], - "members": { - "certificateChain": { - "shape": "FilePath", - "documentation": "

The certificate trust chain for a certificate stored on the file system of the virtual\n node that the proxy is running on.

" + "VirtualGatewayAccessLog":{ + "type":"structure", + "members":{ + "file":{ + "shape":"VirtualGatewayFileAccessLog", + "documentation":"

The file object to send virtual gateway access logs to.

" } }, - "documentation": "

An object that represents a Transport Layer Security (TLS) validation context trust for a local file.

" + "documentation":"

The access log configuration for a virtual gateway.

", + "union":true }, - "LimitExceededException": { - "type": "structure", - "members": { - "message": { - "shape": "String" + "VirtualGatewayBackendDefaults":{ + "type":"structure", + "members":{ + "clientPolicy":{ + "shape":"VirtualGatewayClientPolicy", + "documentation":"

A reference to an object that represents a client policy.

" } }, - "documentation": "

You have exceeded a service limit for your account. For more information, see Service\n Limits in the AWS App Mesh User Guide.

", - "exception": true, - "error": { - "code": "LimitExceededException", - "httpStatusCode": 400, - "senderFault": true - } + "documentation":"

An object that represents the default properties for a backend.

" }, - "UpdateMeshOutput": { - "type": "structure", - "required": [ - "mesh" - ], - "members": { - "mesh": { - "shape": "MeshData" + "VirtualGatewayCertificateAuthorityArns":{ + "type":"list", + "member":{"shape":"Arn"}, + "max":3, + "min":1 + }, + "VirtualGatewayClientPolicy":{ + "type":"structure", + "members":{ + "tls":{ + "shape":"VirtualGatewayClientPolicyTls", + "documentation":"

A reference to an object that represents a Transport Layer Security (TLS) client policy.

" } }, - "documentation": "", - "payload": "mesh" + "documentation":"

An object that represents a client policy.

" }, - "GrpcRouteMetadataMatchMethod": { - "type": "structure", - "members": { - "exact": { - "shape": "HeaderMatch", - "documentation": "

The value sent by the client must match the specified value exactly.

" + "VirtualGatewayClientPolicyTls":{ + "type":"structure", + "required":["validation"], + "members":{ + "enforce":{ + "shape":"Boolean", + "documentation":"

Whether the policy is enforced. The default is True, if a value isn't specified.

", + "box":true }, - "prefix": { - "shape": "HeaderMatch", - "documentation": "

The value sent by the client must begin with the specified characters.

" + "ports":{ + "shape":"PortSet", + "documentation":"

One or more ports that the policy is enforced for.

" }, - "range": { - "shape": "MatchRange", - "documentation": "

An object that represents the range of values to match on.

" + "validation":{ + "shape":"VirtualGatewayTlsValidationContext", + "documentation":"

A reference to an object that represents a TLS validation context.

" + } + }, + "documentation":"

An object that represents a Transport Layer Security (TLS) client policy.

" + }, + "VirtualGatewayConnectionPool":{ + "type":"structure", + "members":{ + "grpc":{ + "shape":"VirtualGatewayGrpcConnectionPool", + "documentation":"

An object that represents a type of connection pool.

" }, - "regex": { - "shape": "HeaderMatch", - "documentation": "

The value sent by the client must include the specified characters.

" + "http":{ + "shape":"VirtualGatewayHttpConnectionPool", + "documentation":"

An object that represents a type of connection pool.

" }, - "suffix": { - "shape": "HeaderMatch", - "documentation": "

The value sent by the client must end with the specified characters.

" + "http2":{ + "shape":"VirtualGatewayHttp2ConnectionPool", + "documentation":"

An object that represents a type of connection pool.

" } }, - "documentation": "

An object that represents the match method. Specify one of the match values.

" + "documentation":"

An object that represents the type of virtual gateway connection pool.

Only one protocol is used at a time and should be the same protocol as the one chosen under port mapping.

If not present the default value for maxPendingRequests is 2147483647.

", + "union":true }, - "DescribeVirtualServiceInput": { - "type": "structure", - "required": [ + "VirtualGatewayData":{ + "type":"structure", + "required":[ "meshName", - "virtualServiceName" - ], - "members": { - "meshName": { - "shape": "ResourceName", - "documentation": "

The name of the service mesh that the virtual service resides in.

", - "location": "uri", - "locationName": "meshName" - }, - "meshOwner": { - "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", - "location": "querystring", - "locationName": "meshOwner" - }, - "virtualServiceName": { - "shape": "ServiceName", - "documentation": "

The name of the virtual service to describe.

", - "location": "uri", - "locationName": "virtualServiceName" - } - }, - "documentation": "" - }, - "ListVirtualServicesLimit": { - "type": "integer", - "box": true, - "min": 1, - "max": 100 - }, - "AwsCloudMapInstanceAttribute": { - "type": "structure", - "required": [ - "key", - "value" + "metadata", + "spec", + "status", + "virtualGatewayName" ], - "members": { - "key": { - "shape": "AwsCloudMapInstanceAttributeKey", - "documentation": "

The name of an AWS Cloud Map service instance attribute key. Any AWS Cloud Map service\n instance that contains the specified key and value is returned.

" + "members":{ + "meshName":{ + "shape":"ResourceName", + "documentation":"

The name of the service mesh that the virtual gateway resides in.

" + }, + "metadata":{"shape":"ResourceMetadata"}, + "spec":{ + "shape":"VirtualGatewaySpec", + "documentation":"

The specifications of the virtual gateway.

" + }, + "status":{ + "shape":"VirtualGatewayStatus", + "documentation":"

The current status of the virtual gateway.

" }, - "value": { - "shape": "AwsCloudMapInstanceAttributeValue", - "documentation": "

The value of an AWS Cloud Map service instance attribute key. Any AWS Cloud Map service\n instance that contains the specified key and value is returned.

" + "virtualGatewayName":{ + "shape":"ResourceName", + "documentation":"

The name of the virtual gateway.

" } }, - "documentation": "

An object that represents the AWS Cloud Map attribute information for your virtual\n node.

" + "documentation":"

An object that represents a virtual gateway returned by a describe operation.

" }, - "VirtualGatewayListenerTlsMode": { - "type": "string", - "enum": [ - "DISABLED", - "PERMISSIVE", - "STRICT" - ] - }, - "VirtualServiceSpec": { - "type": "structure", - "members": { - "provider": { - "shape": "VirtualServiceProvider", - "documentation": "

The App Mesh object that is acting as the provider for a virtual service. You can specify\n a single virtual node or virtual router.

" + "VirtualGatewayFileAccessLog":{ + "type":"structure", + "required":["path"], + "members":{ + "path":{ + "shape":"FilePath", + "documentation":"

The file path to write access logs to. You can use /dev/stdout to send access logs to standard out and configure your Envoy container to use a log driver, such as awslogs, to export the access logs to a log storage service such as Amazon CloudWatch Logs. You can also specify a path in the Envoy container's file system to write the files to disk.

" } }, - "documentation": "

An object that represents the specification of a virtual service.

" + "documentation":"

An object that represents an access log file.

" }, - "VirtualGatewayTlsValidationContextAcmTrust": { - "type": "structure", - "required": [ - "certificateAuthorityArns" - ], - "members": { - "certificateAuthorityArns": { - "shape": "VirtualGatewayCertificateAuthorityArns", - "documentation": "

One or more ACM Amazon Resource Name (ARN)s.

" + "VirtualGatewayGrpcConnectionPool":{ + "type":"structure", + "required":["maxRequests"], + "members":{ + "maxRequests":{ + "shape":"MaxRequests", + "documentation":"

Maximum number of inflight requests Envoy can concurrently support across hosts in upstream cluster.

" } }, - "documentation": "

An object that represents a TLS validation context trust for an AWS Certicate Manager (ACM)\n certificate.

" + "documentation":"

An object that represents a type of connection pool.

" }, - "VirtualGatewayAccessLog": { - "type": "structure", - "members": { - "file": { - "shape": "VirtualGatewayFileAccessLog", - "documentation": "

The file object to send virtual gateway access logs to.

" + "VirtualGatewayHealthCheckIntervalMillis":{ + "type":"long", + "box":true, + "max":300000, + "min":5000 + }, + "VirtualGatewayHealthCheckPolicy":{ + "type":"structure", + "required":[ + "healthyThreshold", + "intervalMillis", + "protocol", + "timeoutMillis", + "unhealthyThreshold" + ], + "members":{ + "healthyThreshold":{ + "shape":"VirtualGatewayHealthCheckThreshold", + "documentation":"

The number of consecutive successful health checks that must occur before declaring the listener healthy.

" + }, + "intervalMillis":{ + "shape":"VirtualGatewayHealthCheckIntervalMillis", + "documentation":"

The time period in milliseconds between each health check execution.

" + }, + "path":{ + "shape":"String", + "documentation":"

The destination path for the health check request. This value is only used if the specified protocol is HTTP or HTTP/2. For any other protocol, this value is ignored.

" + }, + "port":{ + "shape":"PortNumber", + "documentation":"

The destination port for the health check request. This port must match the port defined in the PortMapping for the listener.

" + }, + "protocol":{ + "shape":"VirtualGatewayPortProtocol", + "documentation":"

The protocol for the health check request. If you specify grpc, then your service must conform to the GRPC Health Checking Protocol.

" + }, + "timeoutMillis":{ + "shape":"VirtualGatewayHealthCheckTimeoutMillis", + "documentation":"

The amount of time to wait when receiving a response from the health check, in milliseconds.

" + }, + "unhealthyThreshold":{ + "shape":"VirtualGatewayHealthCheckThreshold", + "documentation":"

The number of consecutive failed health checks that must occur before declaring a virtual gateway unhealthy.

" } }, - "documentation": "

The access log configuration for a virtual gateway.

" + "documentation":"

An object that represents the health check policy for a virtual gateway's listener.

" }, - "MatchRange": { - "type": "structure", - "required": [ - "end", - "start" - ], - "members": { - "end": { - "shape": "Long", - "documentation": "

The end of the range.

" - }, - "start": { - "shape": "Long", - "documentation": "

The start of the range.

" - } - }, - "documentation": "

An object that represents the range of values to match on. The first character of the range is included in the range, though the last character is not. For example, if the range specified were 1-100, only values 1-99 would be matched.

" - }, - "ListVirtualRoutersLimit": { - "type": "integer", - "box": true, - "min": 1, - "max": 100 - }, - "HealthCheckIntervalMillis": { - "type": "long", - "box": true, - "min": 5000, - "max": 300000 - }, - "VirtualRouterList": { - "type": "list", - "member": { - "shape": "VirtualRouterRef" - } + "VirtualGatewayHealthCheckThreshold":{ + "type":"integer", + "max":10, + "min":2 }, - "Arn": { - "type": "string" + "VirtualGatewayHealthCheckTimeoutMillis":{ + "type":"long", + "box":true, + "max":60000, + "min":2000 }, - "TcpRoute": { - "type": "structure", - "required": [ - "action" - ], - "members": { - "action": { - "shape": "TcpRouteAction", - "documentation": "

The action to take if a match is determined.

" - }, - "timeout": { - "shape": "TcpTimeout", - "documentation": "

An object that represents types of timeouts.

" + "VirtualGatewayHttp2ConnectionPool":{ + "type":"structure", + "required":["maxRequests"], + "members":{ + "maxRequests":{ + "shape":"MaxRequests", + "documentation":"

Maximum number of inflight requests Envoy can concurrently support across hosts in upstream cluster.

" } }, - "documentation": "

An object that represents a TCP route type.

" - }, - "VirtualNodeList": { - "type": "list", - "member": { - "shape": "VirtualNodeRef" - } + "documentation":"

An object that represents a type of connection pool.

" }, - "UpdateVirtualGatewayOutput": { - "type": "structure", - "required": [ - "virtualGateway" - ], - "members": { - "virtualGateway": { - "shape": "VirtualGatewayData", - "documentation": "

A full description of the virtual gateway that was updated.

" + "VirtualGatewayHttpConnectionPool":{ + "type":"structure", + "required":["maxConnections"], + "members":{ + "maxConnections":{ + "shape":"MaxConnections", + "documentation":"

Maximum number of outbound TCP connections Envoy can establish concurrently with all hosts in upstream cluster.

" + }, + "maxPendingRequests":{ + "shape":"MaxPendingRequests", + "documentation":"

Number of overflowing requests after max_connections Envoy will queue to upstream cluster.

", + "box":true } }, - "payload": "virtualGateway" + "documentation":"

An object that represents a type of connection pool.

" }, - "ListVirtualRoutersInput": { - "type": "structure", - "required": [ - "meshName" - ], - "members": { - "limit": { - "shape": "ListVirtualRoutersLimit", - "documentation": "

The maximum number of results returned by ListVirtualRouters in paginated\n output. When you use this parameter, ListVirtualRouters returns only\n limit results in a single page along with a nextToken response\n element. You can see the remaining results of the initial request by sending another\n ListVirtualRouters request with the returned nextToken value.\n This value can be between 1 and 100. If you don't use this\n parameter, ListVirtualRouters returns up to 100 results and\n a nextToken value if applicable.

", - "location": "querystring", - "locationName": "limit" - }, - "meshName": { - "shape": "ResourceName", - "documentation": "

The name of the service mesh to list virtual routers in.

", - "location": "uri", - "locationName": "meshName" - }, - "meshOwner": { - "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", - "location": "querystring", - "locationName": "meshOwner" - }, - "nextToken": { - "shape": "String", - "documentation": "

The nextToken value returned from a previous paginated\n ListVirtualRouters request where limit was used and the\n results exceeded the value of that parameter. Pagination continues from the end of the\n previous results that returned the nextToken value.

", - "location": "querystring", - "locationName": "nextToken" - } - }, - "documentation": "" - }, - "DurationUnit": { - "type": "string", - "enum": [ - "ms", - "s" - ] + "VirtualGatewayList":{ + "type":"list", + "member":{"shape":"VirtualGatewayRef"} }, - "RoutePriority": { - "type": "integer", - "box": true, - "min": 0, - "max": 1000 + "VirtualGatewayListener":{ + "type":"structure", + "required":["portMapping"], + "members":{ + "connectionPool":{ + "shape":"VirtualGatewayConnectionPool", + "documentation":"

The connection pool information for the virtual gateway listener.

" + }, + "healthCheck":{ + "shape":"VirtualGatewayHealthCheckPolicy", + "documentation":"

The health check information for the listener.

" + }, + "portMapping":{ + "shape":"VirtualGatewayPortMapping", + "documentation":"

The port mapping information for the listener.

" + }, + "tls":{ + "shape":"VirtualGatewayListenerTls", + "documentation":"

A reference to an object that represents the Transport Layer Security (TLS) properties for the listener.

" + } + }, + "documentation":"

An object that represents a listener for a virtual gateway.

" }, - "ListVirtualServicesInput": { - "type": "structure", - "required": [ - "meshName" - ], - "members": { - "limit": { - "shape": "ListVirtualServicesLimit", - "documentation": "

The maximum number of results returned by ListVirtualServices in paginated\n output. When you use this parameter, ListVirtualServices returns only\n limit results in a single page along with a nextToken response\n element. You can see the remaining results of the initial request by sending another\n ListVirtualServices request with the returned nextToken value.\n This value can be between 1 and 100. If you don't use this\n parameter, ListVirtualServices returns up to 100 results and\n a nextToken value if applicable.

", - "location": "querystring", - "locationName": "limit" - }, - "meshName": { - "shape": "ResourceName", - "documentation": "

The name of the service mesh to list virtual services in.

", - "location": "uri", - "locationName": "meshName" - }, - "meshOwner": { - "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", - "location": "querystring", - "locationName": "meshOwner" - }, - "nextToken": { - "shape": "String", - "documentation": "

The nextToken value returned from a previous paginated\n ListVirtualServices request where limit was used and the\n results exceeded the value of that parameter. Pagination continues from the end of the\n previous results that returned the nextToken value.

", - "location": "querystring", - "locationName": "nextToken" - } - }, - "documentation": "" - }, - "AccessLog": { - "type": "structure", - "members": { - "file": { - "shape": "FileAccessLog", - "documentation": "

The file object to send virtual node access logs to.

" - } - }, - "documentation": "

An object that represents the access logging information for a virtual node.

" - }, - "ListVirtualNodesInput": { - "type": "structure", - "required": [ - "meshName" + "VirtualGatewayListenerTls":{ + "type":"structure", + "required":[ + "certificate", + "mode" ], - "members": { - "limit": { - "shape": "ListVirtualNodesLimit", - "documentation": "

The maximum number of results returned by ListVirtualNodes in paginated\n output. When you use this parameter, ListVirtualNodes returns only\n limit results in a single page along with a nextToken response\n element. You can see the remaining results of the initial request by sending another\n ListVirtualNodes request with the returned nextToken value.\n This value can be between 1 and 100. If you don't use this\n parameter, ListVirtualNodes returns up to 100 results and a\n nextToken value if applicable.

", - "location": "querystring", - "locationName": "limit" - }, - "meshName": { - "shape": "ResourceName", - "documentation": "

The name of the service mesh to list virtual nodes in.

", - "location": "uri", - "locationName": "meshName" - }, - "meshOwner": { - "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", - "location": "querystring", - "locationName": "meshOwner" - }, - "nextToken": { - "shape": "String", - "documentation": "

The nextToken value returned from a previous paginated\n ListVirtualNodes request where limit was used and the results\n exceeded the value of that parameter. Pagination continues from the end of the previous\n results that returned the nextToken value.

", - "location": "querystring", - "locationName": "nextToken" - } - }, - "documentation": "" - }, - "VirtualGatewayClientPolicy": { - "type": "structure", - "members": { - "tls": { - "shape": "VirtualGatewayClientPolicyTls", - "documentation": "

A reference to an object that represents a Transport Layer Security (TLS) client policy.

" - } - }, - "documentation": "

An object that represents a client policy.

" - }, - "ListVirtualNodesLimit": { - "type": "integer", - "box": true, - "min": 1, - "max": 100 - }, - "HealthCheckTimeoutMillis": { - "type": "long", - "box": true, - "min": 2000, - "max": 60000 - }, - "ResourceName": { - "type": "string", - "min": 1, - "max": 255 - }, - "TooManyRequestsException": { - "type": "structure", - "members": { - "message": { - "shape": "String" - } - }, - "documentation": "

The maximum request rate permitted by the App Mesh APIs has been exceeded for your\n account. For best results, use an increasing or variable sleep interval between\n requests.

", - "exception": true, - "error": { - "code": "TooManyRequestsException", - "httpStatusCode": 429, - "senderFault": true - } - }, - "Timestamp": { - "type": "timestamp" - }, - "VirtualGatewayLogging": { - "type": "structure", - "members": { - "accessLog": { - "shape": "VirtualGatewayAccessLog", - "documentation": "

The access log configuration.

" + "members":{ + "certificate":{ + "shape":"VirtualGatewayListenerTlsCertificate", + "documentation":"

An object that represents a Transport Layer Security (TLS) certificate.

" + }, + "mode":{ + "shape":"VirtualGatewayListenerTlsMode", + "documentation":"

Specify one of the following modes.

  • STRICT – Listener only accepts connections with TLS enabled.

  • PERMISSIVE – Listener accepts connections with or without TLS enabled.

  • DISABLED – Listener only accepts connections without TLS.

" } }, - "documentation": "

An object that represents logging information.

" - }, - "HeaderMatch": { - "type": "string", - "min": 1, - "max": 255 + "documentation":"

An object that represents the Transport Layer Security (TLS) properties for a listener.

" }, - "AccountId": { - "type": "string", - "min": 12, - "max": 12 - }, - "GatewayRouteTarget": { - "type": "structure", - "required": [ - "virtualService" - ], - "members": { - "virtualService": { - "shape": "GatewayRouteVirtualService", - "documentation": "

An object that represents a virtual service gateway route target.

" + "VirtualGatewayListenerTlsAcmCertificate":{ + "type":"structure", + "required":["certificateArn"], + "members":{ + "certificateArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) for the certificate. The certificate must meet specific requirements and you must have proxy authorization enabled. For more information, see Transport Layer Security (TLS).

" } }, - "documentation": "

An object that represents a gateway route target.

" + "documentation":"

An object that represents an AWS Certicate Manager (ACM) certificate.

" }, - "Duration": { - "type": "structure", - "members": { - "unit": { - "shape": "DurationUnit", - "documentation": "

A unit of time.

" + "VirtualGatewayListenerTlsCertificate":{ + "type":"structure", + "members":{ + "acm":{ + "shape":"VirtualGatewayListenerTlsAcmCertificate", + "documentation":"

A reference to an object that represents an AWS Certicate Manager (ACM) certificate.

" }, - "value": { - "shape": "DurationValue", - "documentation": "

A number of time units.

" + "file":{ + "shape":"VirtualGatewayListenerTlsFileCertificate", + "documentation":"

A reference to an object that represents a local file certificate.

" } }, - "documentation": "

An object that represents a duration of time.

" + "documentation":"

An object that represents a listener's Transport Layer Security (TLS) certificate.

", + "union":true }, - "DescribeRouteOutput": { - "type": "structure", - "required": [ - "route" + "VirtualGatewayListenerTlsFileCertificate":{ + "type":"structure", + "required":[ + "certificateChain", + "privateKey" ], - "members": { - "route": { - "shape": "RouteData", - "documentation": "

The full description of your route.

" + "members":{ + "certificateChain":{ + "shape":"FilePath", + "documentation":"

The certificate chain for the certificate.

" + }, + "privateKey":{ + "shape":"FilePath", + "documentation":"

The private key for a certificate stored on the file system of the mesh endpoint that the proxy is running on.

" } }, - "documentation": "", - "payload": "route" + "documentation":"

An object that represents a local file certificate. The certificate must meet specific requirements and you must have proxy authorization enabled. For more information, see Transport Layer Security (TLS).

" }, - "HttpRouteMatch": { - "type": "structure", - "required": [ - "prefix" - ], - "members": { - "headers": { - "shape": "HttpRouteHeaders", - "documentation": "

An object that represents the client request headers to match on.

" - }, - "method": { - "shape": "HttpMethod", - "documentation": "

The client request method to match on. Specify only one.

" - }, - "prefix": { - "shape": "String", - "documentation": "

Specifies the path to match requests with. This parameter must always start with\n /, which by itself matches all requests to the virtual service name. You\n can also match for path-based routing of requests. For example, if your virtual service\n name is my-service.local and you want the route to match requests to\n my-service.local/metrics, your prefix should be\n /metrics.

" - }, - "scheme": { - "shape": "HttpScheme", - "documentation": "

The client request scheme to match on. Specify only one.

" + "VirtualGatewayListenerTlsMode":{ + "type":"string", + "enum":[ + "STRICT", + "PERMISSIVE", + "DISABLED" + ] + }, + "VirtualGatewayListeners":{ + "type":"list", + "member":{"shape":"VirtualGatewayListener"}, + "max":1, + "min":0 + }, + "VirtualGatewayLogging":{ + "type":"structure", + "members":{ + "accessLog":{ + "shape":"VirtualGatewayAccessLog", + "documentation":"

The access log configuration.

" } }, - "documentation": "

An object that represents the requirements for a route to match HTTP requests for a\n virtual router.

" + "documentation":"

An object that represents logging information.

" }, - "TagRef": { - "type": "structure", - "required": [ - "key" + "VirtualGatewayPortMapping":{ + "type":"structure", + "required":[ + "port", + "protocol" ], - "members": { - "key": { - "shape": "TagKey", - "documentation": "

One part of a key-value pair that make up a tag. A key is a general label\n that acts like a category for more specific tag values.

" + "members":{ + "port":{ + "shape":"PortNumber", + "documentation":"

The port used for the port mapping. Specify one protocol.

" }, - "value": { - "shape": "TagValue", - "documentation": "

The optional part of a key-value pair that make up a tag. A value acts as a\n descriptor within a tag category (key).

" + "protocol":{ + "shape":"VirtualGatewayPortProtocol", + "documentation":"

The protocol used for the port mapping.

" } }, - "documentation": "

Optional metadata that you apply to a resource to assist with categorization and\n organization. Each tag consists of a key and an optional value, both of which you define.\n Tag keys can have a maximum character length of 128 characters, and tag values can have\n a maximum length of 256 characters.

" + "documentation":"

An object that represents a port mapping.

" + }, + "VirtualGatewayPortProtocol":{ + "type":"string", + "enum":[ + "http", + "http2", + "grpc" + ] }, - "MeshRef": { - "type": "structure", - "required": [ + "VirtualGatewayRef":{ + "type":"structure", + "required":[ "arn", "createdAt", "lastUpdatedAt", "meshName", "meshOwner", "resourceOwner", - "version" + "version", + "virtualGatewayName" ], - "members": { - "arn": { - "shape": "Arn", - "documentation": "

The full Amazon Resource Name (ARN) of the service mesh.

" + "members":{ + "arn":{ + "shape":"Arn", + "documentation":"

The full Amazon Resource Name (ARN) for the resource.

" }, - "createdAt": { - "shape": "Timestamp", - "documentation": "

The Unix epoch timestamp in seconds for when the resource was created.

" + "createdAt":{ + "shape":"Timestamp", + "documentation":"

The Unix epoch timestamp in seconds for when the resource was created.

" }, - "lastUpdatedAt": { - "shape": "Timestamp", - "documentation": "

The Unix epoch timestamp in seconds for when the resource was last updated.

" + "lastUpdatedAt":{ + "shape":"Timestamp", + "documentation":"

The Unix epoch timestamp in seconds for when the resource was last updated.

" }, - "meshName": { - "shape": "ResourceName", - "documentation": "

The name of the service mesh.

" + "meshName":{ + "shape":"ResourceName", + "documentation":"

The name of the service mesh that the resource resides in.

" }, - "meshOwner": { - "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

" + "meshOwner":{ + "shape":"AccountId", + "documentation":"

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

" }, - "resourceOwner": { - "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the resource owner. If the account ID is not your own, then it's\n the ID of the mesh owner or of another account that the mesh is shared with. For more information about mesh sharing, see Working with shared meshes.

" + "resourceOwner":{ + "shape":"AccountId", + "documentation":"

The AWS IAM account ID of the resource owner. If the account ID is not your own, then it's the ID of the mesh owner or of another account that the mesh is shared with. For more information about mesh sharing, see Working with shared meshes.

" }, - "version": { - "shape": "Long", - "documentation": "

The version of the resource. Resources are created at version 1, and this version is incremented each time that they're updated.

" + "version":{ + "shape":"Long", + "documentation":"

The version of the resource. Resources are created at version 1, and this version is incremented each time that they're updated.

" + }, + "virtualGatewayName":{ + "shape":"ResourceName", + "documentation":"

The name of the resource.

" } }, - "documentation": "

An object that represents a service mesh returned by a list operation.

" - }, - "ListVirtualGatewaysLimit": { - "type": "integer", - "box": true, - "min": 1, - "max": 100 - }, - "MeshStatusCode": { - "type": "string", - "enum": [ - "ACTIVE", - "DELETED", - "INACTIVE" - ] + "documentation":"

An object that represents a virtual gateway returned by a list operation.

" }, - "MeshData": { - "type": "structure", - "required": [ - "meshName", - "metadata", - "spec", - "status" - ], - "members": { - "meshName": { - "shape": "ResourceName", - "documentation": "

The name of the service mesh.

" + "VirtualGatewaySpec":{ + "type":"structure", + "required":["listeners"], + "members":{ + "backendDefaults":{ + "shape":"VirtualGatewayBackendDefaults", + "documentation":"

A reference to an object that represents the defaults for backends.

" }, - "metadata": { - "shape": "ResourceMetadata", - "documentation": "

The associated metadata for the service mesh.

" + "listeners":{ + "shape":"VirtualGatewayListeners", + "documentation":"

The listeners that the mesh endpoint is expected to receive inbound traffic from. You can specify one listener.

" }, - "spec": { - "shape": "MeshSpec", - "documentation": "

The associated specification for the service mesh.

" - }, - "status": { - "shape": "MeshStatus", - "documentation": "

The status of the service mesh.

" - } + "logging":{"shape":"VirtualGatewayLogging"} }, - "documentation": "

An object that represents a service mesh returned by a describe operation.

" + "documentation":"

An object that represents the specification of a service mesh resource.

" }, - "CreateGatewayRouteOutput": { - "type": "structure", - "required": [ - "gatewayRoute" - ], - "members": { - "gatewayRoute": { - "shape": "GatewayRouteData", - "documentation": "

The full description of your gateway route following the create call.

" + "VirtualGatewayStatus":{ + "type":"structure", + "required":["status"], + "members":{ + "status":{ + "shape":"VirtualGatewayStatusCode", + "documentation":"

The current status.

" } }, - "payload": "gatewayRoute" + "documentation":"

An object that represents the status of the mesh resource.

" }, - "GatewayRouteList": { - "type": "list", - "member": { - "shape": "GatewayRouteRef" - } + "VirtualGatewayStatusCode":{ + "type":"string", + "enum":[ + "ACTIVE", + "INACTIVE", + "DELETED" + ] }, - "VirtualRouterStatus": { - "type": "structure", - "required": [ - "status" - ], - "members": { - "status": { - "shape": "VirtualRouterStatusCode", - "documentation": "

The current status of the virtual router.

" + "VirtualGatewayTlsValidationContext":{ + "type":"structure", + "required":["trust"], + "members":{ + "trust":{ + "shape":"VirtualGatewayTlsValidationContextTrust", + "documentation":"

A reference to an object that represents a TLS validation context trust.

" } }, - "documentation": "

An object that represents the status of a virtual router.

" + "documentation":"

An object that represents a Transport Layer Security (TLS) validation context.

" }, - "TcpRouteAction": { - "type": "structure", - "required": [ - "weightedTargets" - ], - "members": { - "weightedTargets": { - "shape": "WeightedTargets", - "documentation": "

An object that represents the targets that traffic is routed to when a request matches the route.

" + "VirtualGatewayTlsValidationContextAcmTrust":{ + "type":"structure", + "required":["certificateAuthorityArns"], + "members":{ + "certificateAuthorityArns":{ + "shape":"VirtualGatewayCertificateAuthorityArns", + "documentation":"

One or more ACM Amazon Resource Name (ARN)s.

" } }, - "documentation": "

An object that represents the action to take if a match is determined.

" - }, - "DeleteVirtualGatewayInput": { - "type": "structure", - "required": [ - "meshName", - "virtualGatewayName" - ], - "members": { - "meshName": { - "shape": "ResourceName", - "documentation": "

The name of the service mesh to delete the virtual gateway from.

", - "location": "uri", - "locationName": "meshName" - }, - "meshOwner": { - "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", - "location": "querystring", - "locationName": "meshOwner" - }, - "virtualGatewayName": { - "shape": "ResourceName", - "documentation": "

The name of the virtual gateway to delete.

", - "location": "uri", - "locationName": "virtualGatewayName" - } - } + "documentation":"

An object that represents a TLS validation context trust for an AWS Certicate Manager (ACM) certificate.

" }, - "DescribeVirtualNodeInput": { - "type": "structure", - "required": [ - "meshName", - "virtualNodeName" - ], - "members": { - "meshName": { - "shape": "ResourceName", - "documentation": "

The name of the service mesh that the virtual node resides in.

", - "location": "uri", - "locationName": "meshName" - }, - "meshOwner": { - "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", - "location": "querystring", - "locationName": "meshOwner" - }, - "virtualNodeName": { - "shape": "ResourceName", - "documentation": "

The name of the virtual node to describe.

", - "location": "uri", - "locationName": "virtualNodeName" - } - }, - "documentation": "" - }, - "RouteStatus": { - "type": "structure", - "required": [ - "status" - ], - "members": { - "status": { - "shape": "RouteStatusCode", - "documentation": "

The current status for the route.

" + "VirtualGatewayTlsValidationContextFileTrust":{ + "type":"structure", + "required":["certificateChain"], + "members":{ + "certificateChain":{ + "shape":"FilePath", + "documentation":"

The certificate trust chain for a certificate stored on the file system of the virtual node that the proxy is running on.

" } }, - "documentation": "

An object that represents the current status of a route.

" + "documentation":"

An object that represents a Transport Layer Security (TLS) validation context trust for a local file.

" }, - "Listener": { - "type": "structure", - "required": [ - "portMapping" - ], - "members": { - "healthCheck": { - "shape": "HealthCheckPolicy", - "documentation": "

The health check information for the listener.

" - }, - "portMapping": { - "shape": "PortMapping", - "documentation": "

The port mapping information for the listener.

" + "VirtualGatewayTlsValidationContextTrust":{ + "type":"structure", + "members":{ + "acm":{ + "shape":"VirtualGatewayTlsValidationContextAcmTrust", + "documentation":"

A reference to an object that represents a TLS validation context trust for an AWS Certicate Manager (ACM) certificate.

" }, - "timeout": { - "shape": "ListenerTimeout", - "documentation": "

An object that represents timeouts for different protocols.

" - }, - "tls": { - "shape": "ListenerTls", - "documentation": "

A reference to an object that represents the Transport Layer Security (TLS) properties for a listener.

" + "file":{ + "shape":"VirtualGatewayTlsValidationContextFileTrust", + "documentation":"

An object that represents a TLS validation context trust for a local file.

" } }, - "documentation": "

An object that represents a listener for a virtual node.

" + "documentation":"

An object that represents a Transport Layer Security (TLS) validation context trust.

", + "union":true }, - "GrpcRoute": { - "type": "structure", - "required": [ - "action", - "match" - ], - "members": { - "action": { - "shape": "GrpcRouteAction", - "documentation": "

An object that represents the action to take if a match is determined.

" + "VirtualNodeConnectionPool":{ + "type":"structure", + "members":{ + "grpc":{ + "shape":"VirtualNodeGrpcConnectionPool", + "documentation":"

An object that represents a type of connection pool.

" }, - "match": { - "shape": "GrpcRouteMatch", - "documentation": "

An object that represents the criteria for determining a request match.

" + "http":{ + "shape":"VirtualNodeHttpConnectionPool", + "documentation":"

An object that represents a type of connection pool.

" }, - "retryPolicy": { - "shape": "GrpcRetryPolicy", - "documentation": "

An object that represents a retry policy.

" + "http2":{ + "shape":"VirtualNodeHttp2ConnectionPool", + "documentation":"

An object that represents a type of connection pool.

" }, - "timeout": { - "shape": "GrpcTimeout", - "documentation": "

An object that represents types of timeouts.

" + "tcp":{ + "shape":"VirtualNodeTcpConnectionPool", + "documentation":"

An object that represents a type of connection pool.

" } }, - "documentation": "

An object that represents a gRPC route type.

" + "documentation":"

An object that represents the type of virtual node connection pool.

Only one protocol is used at a time and should be the same protocol as the one chosen under port mapping.

If not present the default value for maxPendingRequests is 2147483647.

", + "union":true }, - "ListRoutesLimit": { - "type": "integer", - "box": true, - "min": 1, - "max": 100 - }, - "ClientPolicyTls": { - "type": "structure", - "required": [ - "validation" + "VirtualNodeData":{ + "type":"structure", + "required":[ + "meshName", + "metadata", + "spec", + "status", + "virtualNodeName" ], - "members": { - "enforce": { - "shape": "Boolean", - "box": true, - "documentation": "

Whether the policy is enforced. The default is True, if a value isn't\n specified.

" + "members":{ + "meshName":{ + "shape":"ResourceName", + "documentation":"

The name of the service mesh that the virtual node resides in.

" }, - "ports": { - "shape": "PortSet", - "documentation": "

One or more ports that the policy is enforced for.

" + "metadata":{ + "shape":"ResourceMetadata", + "documentation":"

The associated metadata for the virtual node.

" }, - "validation": { - "shape": "TlsValidationContext", - "documentation": "

A reference to an object that represents a TLS validation context.

" - } - }, - "documentation": "

An object that represents a Transport Layer Security (TLS) client policy.

" - }, - "VirtualGatewayTlsValidationContextTrust": { - "type": "structure", - "members": { - "acm": { - "shape": "VirtualGatewayTlsValidationContextAcmTrust", - "documentation": "

A reference to an object that represents a TLS validation context trust for an AWS Certicate Manager (ACM)\n certificate.

" + "spec":{ + "shape":"VirtualNodeSpec", + "documentation":"

The specifications of the virtual node.

" + }, + "status":{ + "shape":"VirtualNodeStatus", + "documentation":"

The current status for the virtual node.

" }, - "file": { - "shape": "VirtualGatewayTlsValidationContextFileTrust", - "documentation": "

An object that represents a TLS validation context trust for a local file.

" + "virtualNodeName":{ + "shape":"ResourceName", + "documentation":"

The name of the virtual node.

" } }, - "documentation": "

An object that represents a Transport Layer Security (TLS) validation context trust.

" + "documentation":"

An object that represents a virtual node returned by a describe operation.

" }, - "DeleteVirtualServiceOutput": { - "type": "structure", - "required": [ - "virtualService" - ], - "members": { - "virtualService": { - "shape": "VirtualServiceData", - "documentation": "

The virtual service that was deleted.

" + "VirtualNodeGrpcConnectionPool":{ + "type":"structure", + "required":["maxRequests"], + "members":{ + "maxRequests":{ + "shape":"MaxRequests", + "documentation":"

Maximum number of inflight requests Envoy can concurrently support across hosts in upstream cluster.

" } }, - "documentation": "", - "payload": "virtualService" - }, - "VirtualGatewayPortProtocol": { - "type": "string", - "enum": [ - "grpc", - "http", - "http2" - ] + "documentation":"

An object that represents a type of connection pool.

" }, - "VirtualNodeServiceProvider": { - "type": "structure", - "required": [ - "virtualNodeName" - ], - "members": { - "virtualNodeName": { - "shape": "ResourceName", - "documentation": "

The name of the virtual node that is acting as a service provider.

" + "VirtualNodeHttp2ConnectionPool":{ + "type":"structure", + "required":["maxRequests"], + "members":{ + "maxRequests":{ + "shape":"MaxRequests", + "documentation":"

Maximum number of inflight requests Envoy can concurrently support across hosts in upstream cluster.

" } }, - "documentation": "

An object that represents a virtual node service provider.

" + "documentation":"

An object that represents a type of connection pool.

" }, - "HttpGatewayRoute": { - "type": "structure", - "required": [ - "action", - "match" - ], - "members": { - "action": { - "shape": "HttpGatewayRouteAction", - "documentation": "

An object that represents the action to take if a match is determined.

" + "VirtualNodeHttpConnectionPool":{ + "type":"structure", + "required":["maxConnections"], + "members":{ + "maxConnections":{ + "shape":"MaxConnections", + "documentation":"

Maximum number of outbound TCP connections Envoy can establish concurrently with all hosts in upstream cluster.

" }, - "match": { - "shape": "HttpGatewayRouteMatch", - "documentation": "

An object that represents the criteria for determining a request match.

" + "maxPendingRequests":{ + "shape":"MaxPendingRequests", + "documentation":"

Number of overflowing requests after max_connections Envoy will queue to upstream cluster.

", + "box":true } }, - "documentation": "

An object that represents an HTTP gateway route.

" + "documentation":"

An object that represents a type of connection pool.

" }, - "BackendDefaults": { - "type": "structure", - "members": { - "clientPolicy": { - "shape": "ClientPolicy", - "documentation": "

A reference to an object that represents a client policy.

" - } - }, - "documentation": "

An object that represents the default properties for a backend.

" + "VirtualNodeList":{ + "type":"list", + "member":{"shape":"VirtualNodeRef"} }, - "ListenerTlsFileCertificate": { - "type": "structure", - "required": [ - "certificateChain", - "privateKey" + "VirtualNodeRef":{ + "type":"structure", + "required":[ + "arn", + "createdAt", + "lastUpdatedAt", + "meshName", + "meshOwner", + "resourceOwner", + "version", + "virtualNodeName" ], - "members": { - "certificateChain": { - "shape": "FilePath", - "documentation": "

The certificate chain for the certificate.

" + "members":{ + "arn":{ + "shape":"Arn", + "documentation":"

The full Amazon Resource Name (ARN) for the virtual node.

" }, - "privateKey": { - "shape": "FilePath", - "documentation": "

The private key for a certificate stored on the file system of the virtual node that the\n proxy is running on.

" - } - }, - "documentation": "

An object that represents a local file certificate.\n The certificate must meet specific requirements and you must have proxy authorization enabled. For more information, see Transport Layer Security (TLS).

" - }, - "HttpRetryPolicy": { - "type": "structure", - "required": [ - "maxRetries", - "perRetryTimeout" - ], - "members": { - "httpRetryEvents": { - "shape": "HttpRetryPolicyEvents", - "documentation": "

Specify at least one of the following values.

\n
    \n
  • \n

    \n server-error – HTTP status codes 500, 501,\n 502, 503, 504, 505, 506, 507, 508, 510, and 511

    \n
  • \n
  • \n

    \n gateway-error – HTTP status codes 502,\n 503, and 504

    \n
  • \n
  • \n

    \n client-error – HTTP status code 409

    \n
  • \n
  • \n

    \n stream-error – Retry on refused\n stream

    \n
  • \n
" + "createdAt":{ + "shape":"Timestamp", + "documentation":"

The Unix epoch timestamp in seconds for when the resource was created.

" + }, + "lastUpdatedAt":{ + "shape":"Timestamp", + "documentation":"

The Unix epoch timestamp in seconds for when the resource was last updated.

" + }, + "meshName":{ + "shape":"ResourceName", + "documentation":"

The name of the service mesh that the virtual node resides in.

" + }, + "meshOwner":{ + "shape":"AccountId", + "documentation":"

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

" }, - "maxRetries": { - "shape": "MaxRetries", - "documentation": "

The maximum number of retry attempts.

" + "resourceOwner":{ + "shape":"AccountId", + "documentation":"

The AWS IAM account ID of the resource owner. If the account ID is not your own, then it's the ID of the mesh owner or of another account that the mesh is shared with. For more information about mesh sharing, see Working with shared meshes.

" }, - "perRetryTimeout": { - "shape": "Duration", - "documentation": "

An object that represents a duration of time.

" + "version":{ + "shape":"Long", + "documentation":"

The version of the resource. Resources are created at version 1, and this version is incremented each time that they're updated.

" }, - "tcpRetryEvents": { - "shape": "TcpRetryPolicyEvents", - "documentation": "

Specify a valid value.

" + "virtualNodeName":{ + "shape":"ResourceName", + "documentation":"

The name of the virtual node.

" } }, - "documentation": "

An object that represents a retry policy. Specify at least one value for at least one of the types of RetryEvents, a value for maxRetries, and a value for perRetryTimeout.

" - }, - "DescribeVirtualRouterInput": { - "type": "structure", - "required": [ - "meshName", - "virtualRouterName" - ], - "members": { - "meshName": { - "shape": "ResourceName", - "documentation": "

The name of the service mesh that the virtual router resides in.

", - "location": "uri", - "locationName": "meshName" - }, - "meshOwner": { - "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", - "location": "querystring", - "locationName": "meshOwner" - }, - "virtualRouterName": { - "shape": "ResourceName", - "documentation": "

The name of the virtual router to describe.

", - "location": "uri", - "locationName": "virtualRouterName" - } - }, - "documentation": "" - }, - "TagResourceOutput": { - "type": "structure", - "members": { }, - "documentation": "" - }, - "RouteList": { - "type": "list", - "member": { - "shape": "RouteRef" - } + "documentation":"

An object that represents a virtual node returned by a list operation.

" }, - "TooManyTagsException": { - "type": "structure", - "members": { - "message": { - "shape": "String" + "VirtualNodeServiceProvider":{ + "type":"structure", + "required":["virtualNodeName"], + "members":{ + "virtualNodeName":{ + "shape":"ResourceName", + "documentation":"

The name of the virtual node that is acting as a service provider.

" } }, - "documentation": "

The request exceeds the maximum allowed number of tags allowed per resource. The current\n limit is 50 user tags per resource. You must reduce the number of tags in the request. None\n of the tags in this request were applied.

", - "exception": true, - "error": { - "code": "TooManyTagsException", - "httpStatusCode": 400, - "senderFault": true - } - }, - "UpdateGatewayRouteInput": { - "type": "structure", - "required": [ - "gatewayRouteName", - "meshName", - "spec", - "virtualGatewayName" - ], - "members": { - "clientToken": { - "shape": "String", - "documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the\nrequest. Up to 36 letters, numbers, hyphens, and underscores are allowed.

", - "idempotencyToken": true - }, - "gatewayRouteName": { - "shape": "ResourceName", - "documentation": "

The name of the gateway route to update.

", - "location": "uri", - "locationName": "gatewayRouteName" - }, - "meshName": { - "shape": "ResourceName", - "documentation": "

The name of the service mesh that the gateway route resides in.

", - "location": "uri", - "locationName": "meshName" - }, - "meshOwner": { - "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", - "location": "querystring", - "locationName": "meshOwner" - }, - "spec": { - "shape": "GatewayRouteSpec", - "documentation": "

The new gateway route specification to apply. This overwrites the existing data.

" - }, - "virtualGatewayName": { - "shape": "ResourceName", - "documentation": "

The name of the virtual gateway that the gateway route is associated with.

", - "location": "uri", - "locationName": "virtualGatewayName" - } - } - }, - "ListVirtualGatewaysInput": { - "type": "structure", - "required": [ - "meshName" - ], - "members": { - "limit": { - "shape": "ListVirtualGatewaysLimit", - "documentation": "

The maximum number of results returned by ListVirtualGateways in paginated\n output. When you use this parameter, ListVirtualGateways returns only\n limit results in a single page along with a nextToken response\n element. You can see the remaining results of the initial request by sending another\n ListVirtualGateways request with the returned nextToken value.\n This value can be between 1 and 100. If you don't use this\n parameter, ListVirtualGateways returns up to 100 results and\n a nextToken value if applicable.

", - "location": "querystring", - "locationName": "limit" - }, - "meshName": { - "shape": "ResourceName", - "documentation": "

The name of the service mesh to list virtual gateways in.

", - "location": "uri", - "locationName": "meshName" - }, - "meshOwner": { - "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", - "location": "querystring", - "locationName": "meshOwner" - }, - "nextToken": { - "shape": "String", - "documentation": "

The nextToken value returned from a previous paginated\n ListVirtualGateways request where limit was used and the\n results exceeded the value of that parameter. Pagination continues from the end of the\n previous results that returned the nextToken value.

", - "location": "querystring", - "locationName": "nextToken" - } - } + "documentation":"

An object that represents a virtual node service provider.

" }, - "PortNumber": { - "type": "integer", - "min": 1, - "max": 65535 - }, - "TlsValidationContextFileTrust": { - "type": "structure", - "required": [ - "certificateChain" - ], - "members": { - "certificateChain": { - "shape": "FilePath", - "documentation": "

The certificate trust chain for a certificate stored on the file system of the virtual\n node that the proxy is running on.

" + "VirtualNodeSpec":{ + "type":"structure", + "members":{ + "backendDefaults":{ + "shape":"BackendDefaults", + "documentation":"

A reference to an object that represents the defaults for backends.

" + }, + "backends":{ + "shape":"Backends", + "documentation":"

The backends that the virtual node is expected to send outbound traffic to.

" + }, + "listeners":{ + "shape":"Listeners", + "documentation":"

The listener that the virtual node is expected to receive inbound traffic from. You can specify one listener.

" + }, + "logging":{ + "shape":"Logging", + "documentation":"

The inbound and outbound access logging information for the virtual node.

" + }, + "serviceDiscovery":{ + "shape":"ServiceDiscovery", + "documentation":"

The service discovery information for the virtual node. If your virtual node does not expect ingress traffic, you can omit this parameter. If you specify a listener, then you must specify service discovery information.

" } }, - "documentation": "

An object that represents a Transport Layer Security (TLS) validation context trust for a local file.

" + "documentation":"

An object that represents the specification of a virtual node.

" }, - "GrpcRouteMetadata": { - "type": "structure", - "required": [ - "name" - ], - "members": { - "invert": { - "shape": "Boolean", - "documentation": "

Specify True to match anything except the match criteria. The default value is False.

" - }, - "match": { - "shape": "GrpcRouteMetadataMatchMethod", - "documentation": "

An object that represents the data to match from the request.

" - }, - "name": { - "shape": "HeaderName", - "documentation": "

The name of the route.

" + "VirtualNodeStatus":{ + "type":"structure", + "required":["status"], + "members":{ + "status":{ + "shape":"VirtualNodeStatusCode", + "documentation":"

The current status of the virtual node.

" } }, - "documentation": "

An object that represents the match metadata for the route.

" + "documentation":"

An object that represents the current status of the virtual node.

" }, - "CreateRouteInput": { - "type": "structure", - "required": [ - "meshName", - "routeName", - "spec", - "virtualRouterName" - ], - "members": { - "clientToken": { - "shape": "String", - "documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the\nrequest. Up to 36 letters, numbers, hyphens, and underscores are allowed.

", - "idempotencyToken": true - }, - "meshName": { - "shape": "ResourceName", - "documentation": "

The name of the service mesh to create the route in.

", - "location": "uri", - "locationName": "meshName" - }, - "meshOwner": { - "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then\n the account that you specify must share the mesh with your account before you can create \n the resource in the service mesh. For more information about mesh sharing, see Working with shared meshes.

", - "location": "querystring", - "locationName": "meshOwner" - }, - "routeName": { - "shape": "ResourceName", - "documentation": "

The name to use for the route.

" - }, - "spec": { - "shape": "RouteSpec", - "documentation": "

The route specification to apply.

" - }, - "tags": { - "shape": "TagList", - "documentation": "

Optional metadata that you can apply to the route to assist with categorization and\n organization. Each tag consists of a key and an optional value, both of which you define.\n Tag keys can have a maximum character length of 128 characters, and tag values can have\n a maximum length of 256 characters.

", - "tags": [ - "not-preview" - ] - }, - "virtualRouterName": { - "shape": "ResourceName", - "documentation": "

The name of the virtual router in which to create the route. If the virtual router is in\n a shared mesh, then you must be the owner of the virtual router resource.

", - "location": "uri", - "locationName": "virtualRouterName" - } - }, - "documentation": "" - }, - "VirtualGatewayCertificateAuthorityArns": { - "type": "list", - "member": { - "shape": "Arn" - }, - "min": 1, - "max": 3 - }, - "WeightedTargets": { - "type": "list", - "member": { - "shape": "WeightedTarget" - }, - "min": 1, - "max": 10 - }, - "HttpRouteHeaders": { - "type": "list", - "member": { - "shape": "HttpRouteHeader" - }, - "min": 1, - "max": 10 - }, - "String": { - "type": "string" - }, - "TcpTimeout": { - "type": "structure", - "members": { - "idle": { - "shape": "Duration" - } - }, - "documentation": "

An object that represents types of timeouts.

" - }, - "HttpScheme": { - "type": "string", - "enum": [ - "http", - "https" + "VirtualNodeStatusCode":{ + "type":"string", + "enum":[ + "ACTIVE", + "INACTIVE", + "DELETED" ] }, - "DeleteGatewayRouteInput": { - "type": "structure", - "required": [ - "gatewayRouteName", - "meshName", - "virtualGatewayName" - ], - "members": { - "gatewayRouteName": { - "shape": "ResourceName", - "documentation": "

The name of the gateway route to delete.

", - "location": "uri", - "locationName": "gatewayRouteName" - }, - "meshName": { - "shape": "ResourceName", - "documentation": "

The name of the service mesh to delete the gateway route from.

", - "location": "uri", - "locationName": "meshName" - }, - "meshOwner": { - "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", - "location": "querystring", - "locationName": "meshOwner" - }, - "virtualGatewayName": { - "shape": "ResourceName", - "documentation": "

The name of the virtual gateway to delete the route from.

", - "location": "uri", - "locationName": "virtualGatewayName" + "VirtualNodeTcpConnectionPool":{ + "type":"structure", + "required":["maxConnections"], + "members":{ + "maxConnections":{ + "shape":"MaxConnections", + "documentation":"

Maximum number of outbound TCP connections Envoy can establish concurrently with all hosts in upstream cluster.

" } - } + }, + "documentation":"

An object that represents a type of connection pool.

" }, - "UpdateRouteInput": { - "type": "structure", - "required": [ + "VirtualRouterData":{ + "type":"structure", + "required":[ "meshName", - "routeName", + "metadata", "spec", + "status", "virtualRouterName" ], - "members": { - "clientToken": { - "shape": "String", - "documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the\nrequest. Up to 36 letters, numbers, hyphens, and underscores are allowed.

", - "idempotencyToken": true - }, - "meshName": { - "shape": "ResourceName", - "documentation": "

The name of the service mesh that the route resides in.

", - "location": "uri", - "locationName": "meshName" - }, - "meshOwner": { - "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", - "location": "querystring", - "locationName": "meshOwner" - }, - "routeName": { - "shape": "ResourceName", - "documentation": "

The name of the route to update.

", - "location": "uri", - "locationName": "routeName" - }, - "spec": { - "shape": "RouteSpec", - "documentation": "

The new route specification to apply. This overwrites the existing data.

" - }, - "virtualRouterName": { - "shape": "ResourceName", - "documentation": "

The name of the virtual router that the route is associated with.

", - "location": "uri", - "locationName": "virtualRouterName" - } - }, - "documentation": "" - }, - "HttpRoute": { - "type": "structure", - "required": [ - "action", - "match" - ], - "members": { - "action": { - "shape": "HttpRouteAction", - "documentation": "

An object that represents the action to take if a match is determined.

" + "members":{ + "meshName":{ + "shape":"ResourceName", + "documentation":"

The name of the service mesh that the virtual router resides in.

" + }, + "metadata":{ + "shape":"ResourceMetadata", + "documentation":"

The associated metadata for the virtual router.

" }, - "match": { - "shape": "HttpRouteMatch", - "documentation": "

An object that represents the criteria for determining a request match.

" + "spec":{ + "shape":"VirtualRouterSpec", + "documentation":"

The specifications of the virtual router.

" }, - "retryPolicy": { - "shape": "HttpRetryPolicy", - "documentation": "

An object that represents a retry policy.

" + "status":{ + "shape":"VirtualRouterStatus", + "documentation":"

The current status of the virtual router.

" }, - "timeout": { - "shape": "HttpTimeout", - "documentation": "

An object that represents types of timeouts.

" + "virtualRouterName":{ + "shape":"ResourceName", + "documentation":"

The name of the virtual router.

" } }, - "documentation": "

An object that represents an HTTP or HTTP/2 route type.

" + "documentation":"

An object that represents a virtual router returned by a describe operation.

" }, - "DescribeMeshInput": { - "type": "structure", - "required": [ - "meshName" - ], - "members": { - "meshName": { - "shape": "ResourceName", - "documentation": "

The name of the service mesh to describe.

", - "location": "uri", - "locationName": "meshName" - }, - "meshOwner": { - "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", - "location": "querystring", - "locationName": "meshOwner" - } + "VirtualRouterList":{ + "type":"list", + "member":{"shape":"VirtualRouterRef"} + }, + "VirtualRouterListener":{ + "type":"structure", + "required":["portMapping"], + "members":{ + "portMapping":{"shape":"PortMapping"} }, - "documentation": "" + "documentation":"

An object that represents a virtual router listener.

" }, - "VirtualGatewayRef": { - "type": "structure", - "required": [ + "VirtualRouterListeners":{ + "type":"list", + "member":{"shape":"VirtualRouterListener"}, + "max":1, + "min":1 + }, + "VirtualRouterRef":{ + "type":"structure", + "required":[ "arn", "createdAt", "lastUpdatedAt", @@ -5793,173 +4962,249 @@ "meshOwner", "resourceOwner", "version", - "virtualGatewayName" + "virtualRouterName" ], - "members": { - "arn": { - "shape": "Arn", - "documentation": "

The full Amazon Resource Name (ARN) for the resource.

" + "members":{ + "arn":{ + "shape":"Arn", + "documentation":"

The full Amazon Resource Name (ARN) for the virtual router.

" }, - "createdAt": { - "shape": "Timestamp", - "documentation": "

The Unix epoch timestamp in seconds for when the resource was created.

" + "createdAt":{ + "shape":"Timestamp", + "documentation":"

The Unix epoch timestamp in seconds for when the resource was created.

" }, - "lastUpdatedAt": { - "shape": "Timestamp", - "documentation": "

The Unix epoch timestamp in seconds for when the resource was last updated.

" + "lastUpdatedAt":{ + "shape":"Timestamp", + "documentation":"

The Unix epoch timestamp in seconds for when the resource was last updated.

" }, - "meshName": { - "shape": "ResourceName", - "documentation": "

The name of the service mesh that the resource resides in.

" + "meshName":{ + "shape":"ResourceName", + "documentation":"

The name of the service mesh that the virtual router resides in.

" }, - "meshOwner": { - "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

" + "meshOwner":{ + "shape":"AccountId", + "documentation":"

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

" }, - "resourceOwner": { - "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the resource owner. If the account ID is not your own, then it's\n the ID of the mesh owner or of another account that the mesh is shared with. For more information about mesh sharing, see Working with shared meshes.

" + "resourceOwner":{ + "shape":"AccountId", + "documentation":"

The AWS IAM account ID of the resource owner. If the account ID is not your own, then it's the ID of the mesh owner or of another account that the mesh is shared with. For more information about mesh sharing, see Working with shared meshes.

" }, - "version": { - "shape": "Long", - "documentation": "

The version of the resource. Resources are created at version 1, and this version is incremented each time that they're updated.

" + "version":{ + "shape":"Long", + "documentation":"

The version of the resource. Resources are created at version 1, and this version is incremented each time that they're updated.

" }, - "virtualGatewayName": { - "shape": "ResourceName", - "documentation": "

The name of the resource.

" + "virtualRouterName":{ + "shape":"ResourceName", + "documentation":"

The name of the virtual router.

" } }, - "documentation": "

An object that represents a virtual gateway returned by a list operation.

" + "documentation":"

An object that represents a virtual router returned by a list operation.

" }, - "MeshSpec": { - "type": "structure", - "members": { - "egressFilter": { - "shape": "EgressFilter", - "documentation": "

The egress filter rules for the service mesh.

" + "VirtualRouterServiceProvider":{ + "type":"structure", + "required":["virtualRouterName"], + "members":{ + "virtualRouterName":{ + "shape":"ResourceName", + "documentation":"

The name of the virtual router that is acting as a service provider.

" } }, - "documentation": "

An object that represents the specification of a service mesh.

" + "documentation":"

An object that represents a virtual node service provider.

" }, - "DescribeVirtualGatewayOutput": { - "type": "structure", - "required": [ - "virtualGateway" - ], - "members": { - "virtualGateway": { - "shape": "VirtualGatewayData", - "documentation": "

The full description of your virtual gateway.

" + "VirtualRouterSpec":{ + "type":"structure", + "members":{ + "listeners":{ + "shape":"VirtualRouterListeners", + "documentation":"

The listeners that the virtual router is expected to receive inbound traffic from. You can specify one listener.

" } }, - "payload": "virtualGateway" + "documentation":"

An object that represents the specification of a virtual router.

" }, - "DescribeGatewayRouteOutput": { - "type": "structure", - "required": [ - "gatewayRoute" - ], - "members": { - "gatewayRoute": { - "shape": "GatewayRouteData", - "documentation": "

The full description of your gateway route.

" + "VirtualRouterStatus":{ + "type":"structure", + "required":["status"], + "members":{ + "status":{ + "shape":"VirtualRouterStatusCode", + "documentation":"

The current status of the virtual router.

" } }, - "payload": "gatewayRoute" + "documentation":"

An object that represents the status of a virtual router.

" }, - "ListTagsForResourceOutput": { - "type": "structure", - "required": [ - "tags" - ], - "members": { - "nextToken": { - "shape": "String", - "documentation": "

The nextToken value to include in a future ListTagsForResource\n request. When the results of a ListTagsForResource request exceed\n limit, you can use this value to retrieve the next page of results. This\n value is null when there are no more results to return.

" + "VirtualRouterStatusCode":{ + "type":"string", + "enum":[ + "ACTIVE", + "INACTIVE", + "DELETED" + ] + }, + "VirtualServiceBackend":{ + "type":"structure", + "required":["virtualServiceName"], + "members":{ + "clientPolicy":{ + "shape":"ClientPolicy", + "documentation":"

A reference to an object that represents the client policy for a backend.

" }, - "tags": { - "shape": "TagList", - "documentation": "

The tags for the resource.

" + "virtualServiceName":{ + "shape":"ServiceName", + "documentation":"

The name of the virtual service that is acting as a virtual node backend.

" } }, - "documentation": "" + "documentation":"

An object that represents a virtual service backend for a virtual node.

" }, - "ServiceDiscovery": { - "type": "structure", - "members": { - "awsCloudMap": { - "shape": "AwsCloudMapServiceDiscovery", - "documentation": "

Specifies any AWS Cloud Map information for the virtual node.

" + "VirtualServiceData":{ + "type":"structure", + "required":[ + "meshName", + "metadata", + "spec", + "status", + "virtualServiceName" + ], + "members":{ + "meshName":{ + "shape":"ResourceName", + "documentation":"

The name of the service mesh that the virtual service resides in.

" }, - "dns": { - "shape": "DnsServiceDiscovery", - "documentation": "

Specifies the DNS information for the virtual node.

" + "metadata":{"shape":"ResourceMetadata"}, + "spec":{ + "shape":"VirtualServiceSpec", + "documentation":"

The specifications of the virtual service.

" + }, + "status":{ + "shape":"VirtualServiceStatus", + "documentation":"

The current status of the virtual service.

" + }, + "virtualServiceName":{ + "shape":"ServiceName", + "documentation":"

The name of the virtual service.

" } }, - "documentation": "

An object that represents the service discovery information for a virtual node.

" + "documentation":"

An object that represents a virtual service returned by a describe operation.

" }, - "ListVirtualNodesOutput": { - "type": "structure", - "required": [ - "virtualNodes" - ], - "members": { - "nextToken": { - "shape": "String", - "documentation": "

The nextToken value to include in a future ListVirtualNodes\n request. When the results of a ListVirtualNodes request exceed\n limit, you can use this value to retrieve the next page of results. This\n value is null when there are no more results to return.

" + "VirtualServiceList":{ + "type":"list", + "member":{"shape":"VirtualServiceRef"} + }, + "VirtualServiceProvider":{ + "type":"structure", + "members":{ + "virtualNode":{ + "shape":"VirtualNodeServiceProvider", + "documentation":"

The virtual node associated with a virtual service.

" }, - "virtualNodes": { - "shape": "VirtualNodeList", - "documentation": "

The list of existing virtual nodes for the specified service mesh.

" + "virtualRouter":{ + "shape":"VirtualRouterServiceProvider", + "documentation":"

The virtual router associated with a virtual service.

" } }, - "documentation": "" + "documentation":"

An object that represents the provider for a virtual service.

", + "union":true }, - "UntagResourceInput": { - "type": "structure", - "required": [ - "resourceArn", - "tagKeys" + "VirtualServiceRef":{ + "type":"structure", + "required":[ + "arn", + "createdAt", + "lastUpdatedAt", + "meshName", + "meshOwner", + "resourceOwner", + "version", + "virtualServiceName" ], - "members": { - "resourceArn": { - "shape": "Arn", - "documentation": "

The Amazon Resource Name (ARN) of the resource to delete tags from.

", - "location": "querystring", - "locationName": "resourceArn" + "members":{ + "arn":{ + "shape":"Arn", + "documentation":"

The full Amazon Resource Name (ARN) for the virtual service.

" + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

The Unix epoch timestamp in seconds for when the resource was created.

" + }, + "lastUpdatedAt":{ + "shape":"Timestamp", + "documentation":"

The Unix epoch timestamp in seconds for when the resource was last updated.

" }, - "tagKeys": { - "shape": "TagKeyList", - "documentation": "

The keys of the tags to be removed.

" + "meshName":{ + "shape":"ResourceName", + "documentation":"

The name of the service mesh that the virtual service resides in.

" + }, + "meshOwner":{ + "shape":"AccountId", + "documentation":"

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

" + }, + "resourceOwner":{ + "shape":"AccountId", + "documentation":"

The AWS IAM account ID of the resource owner. If the account ID is not your own, then it's the ID of the mesh owner or of another account that the mesh is shared with. For more information about mesh sharing, see Working with shared meshes.

" + }, + "version":{ + "shape":"Long", + "documentation":"

The version of the resource. Resources are created at version 1, and this version is incremented each time that they're updated.

" + }, + "virtualServiceName":{ + "shape":"ServiceName", + "documentation":"

The name of the virtual service.

" } }, - "documentation": "" + "documentation":"

An object that represents a virtual service returned by a list operation.

" }, - "ListenerTlsAcmCertificate": { - "type": "structure", - "required": [ - "certificateArn" - ], - "members": { - "certificateArn": { - "shape": "Arn", - "documentation": "

The Amazon Resource Name (ARN) for the certificate. The certificate must meet specific requirements and you must have proxy authorization enabled. For more information, see Transport Layer Security (TLS).

" + "VirtualServiceSpec":{ + "type":"structure", + "members":{ + "provider":{ + "shape":"VirtualServiceProvider", + "documentation":"

The App Mesh object that is acting as the provider for a virtual service. You can specify a single virtual node or virtual router.

" } }, - "documentation": "

An object that represents an AWS Certicate Manager (ACM) certificate.

" + "documentation":"

An object that represents the specification of a virtual service.

" }, - "TagKey": { - "type": "string", - "min": 1, - "max": 128 + "VirtualServiceStatus":{ + "type":"structure", + "required":["status"], + "members":{ + "status":{ + "shape":"VirtualServiceStatusCode", + "documentation":"

The current status of the virtual service.

" + } + }, + "documentation":"

An object that represents the status of a virtual service.

" }, - "VirtualServiceStatusCode": { - "type": "string", - "enum": [ + "VirtualServiceStatusCode":{ + "type":"string", + "enum":[ "ACTIVE", - "DELETED", - "INACTIVE" + "INACTIVE", + "DELETED" ] + }, + "WeightedTarget":{ + "type":"structure", + "required":[ + "virtualNode", + "weight" + ], + "members":{ + "virtualNode":{ + "shape":"ResourceName", + "documentation":"

The virtual node to associate with the weighted target.

" + }, + "weight":{ + "shape":"PercentInt", + "documentation":"

The relative weight of the weighted target.

" + } + }, + "documentation":"

An object that represents a target and its relative weight. Traffic is distributed across targets according to their relative weight. For example, a weighted target with a relative weight of 50 receives five times as much traffic as one with a relative weight of 10. The total weight for all targets combined must be less than or equal to 100.

" + }, + "WeightedTargets":{ + "type":"list", + "member":{"shape":"WeightedTarget"}, + "max":10, + "min":1 } - } + }, + "documentation":"

AWS App Mesh is a service mesh based on the Envoy proxy that makes it easy to monitor and control microservices. App Mesh standardizes how your microservices communicate, giving you end-to-end visibility and helping to ensure high availability for your applications.

App Mesh gives you consistent visibility and network traffic controls for every microservice in an application. You can use App Mesh with AWS Fargate, Amazon ECS, Amazon EKS, Kubernetes on AWS, and Amazon EC2.

App Mesh supports microservice applications that use service discovery naming for their components. For more information about service discovery on Amazon ECS, see Service Discovery in the Amazon Elastic Container Service Developer Guide. Kubernetes kube-dns and coredns are supported. For more information, see DNS for Services and Pods in the Kubernetes documentation.

" } diff --git a/services/appstream/pom.xml b/services/appstream/pom.xml index 1f1ac2651002..2c9020d9b1e6 100644 --- a/services/appstream/pom.xml +++ b/services/appstream/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT appstream AWS Java SDK :: Services :: Amazon AppStream diff --git a/services/appsync/pom.xml b/services/appsync/pom.xml index 5e766e990745..06b0fc64396c 100644 --- a/services/appsync/pom.xml +++ b/services/appsync/pom.xml @@ -21,7 +21,7 @@ services software.amazon.awssdk - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT appsync diff --git a/services/appsync/src/main/resources/codegen-resources/service-2.json b/services/appsync/src/main/resources/codegen-resources/service-2.json index 30aa7340af4c..d0fb12696941 100644 --- a/services/appsync/src/main/resources/codegen-resources/service-2.json +++ b/services/appsync/src/main/resources/codegen-resources/service-2.json @@ -742,7 +742,7 @@ }, "apiCachingBehavior":{ "shape":"ApiCachingBehavior", - "documentation":"

Caching behavior.

  • FULL_REQUEST_CACHING: All requests are fully cached.

  • PER_RESOLVER_CACHING: Individual resovlers that you specify are cached.

" + "documentation":"

Caching behavior.

  • FULL_REQUEST_CACHING: All requests are fully cached.

  • PER_RESOLVER_CACHING: Individual resolvers that you specify are cached.

" }, "transitEncryptionEnabled":{ "shape":"Boolean", @@ -754,7 +754,7 @@ }, "type":{ "shape":"ApiCacheType", - "documentation":"

The cache instance type. Valid values are

  • SMALL

  • MEDIUM

  • LARGE

  • XLARGE

  • LARGE_2X

  • LARGE_4X

  • LARGE_8X (not available in all regions)

  • LARGE_12X

Historically, instance types were identified by an EC2-style value. As of July 2020, this is deprecated, and the generic identifiers above should be used.

The following legacy instance types are avaible, but their use is discouraged:

  • T2_SMALL: A t2.small instance type.

  • T2_MEDIUM: A t2.medium instance type.

  • R4_LARGE: A r4.large instance type.

  • R4_XLARGE: A r4.xlarge instance type.

  • R4_2XLARGE: A r4.2xlarge instance type.

  • R4_4XLARGE: A r4.4xlarge instance type.

  • R4_8XLARGE: A r4.8xlarge instance type.

" + "documentation":"

The cache instance type. Valid values are

  • SMALL

  • MEDIUM

  • LARGE

  • XLARGE

  • LARGE_2X

  • LARGE_4X

  • LARGE_8X (not available in all regions)

  • LARGE_12X

Historically, instance types were identified by an EC2-style value. As of July 2020, this is deprecated, and the generic identifiers above should be used.

The following legacy instance types are available, but their use is discouraged:

  • T2_SMALL: A t2.small instance type.

  • T2_MEDIUM: A t2.medium instance type.

  • R4_LARGE: A r4.large instance type.

  • R4_XLARGE: A r4.xlarge instance type.

  • R4_2XLARGE: A r4.2xlarge instance type.

  • R4_4XLARGE: A r4.4xlarge instance type.

  • R4_8XLARGE: A r4.8xlarge instance type.

" }, "status":{ "shape":"ApiCacheStatus", @@ -1001,11 +1001,11 @@ }, "apiCachingBehavior":{ "shape":"ApiCachingBehavior", - "documentation":"

Caching behavior.

  • FULL_REQUEST_CACHING: All requests are fully cached.

  • PER_RESOLVER_CACHING: Individual resovlers that you specify are cached.

" + "documentation":"

Caching behavior.

  • FULL_REQUEST_CACHING: All requests are fully cached.

  • PER_RESOLVER_CACHING: Individual resolvers that you specify are cached.

" }, "type":{ "shape":"ApiCacheType", - "documentation":"

The cache instance type. Valid values are

  • SMALL

  • MEDIUM

  • LARGE

  • XLARGE

  • LARGE_2X

  • LARGE_4X

  • LARGE_8X (not available in all regions)

  • LARGE_12X

Historically, instance types were identified by an EC2-style value. As of July 2020, this is deprecated, and the generic identifiers above should be used.

The following legacy instance types are avaible, but their use is discouraged:

  • T2_SMALL: A t2.small instance type.

  • T2_MEDIUM: A t2.medium instance type.

  • R4_LARGE: A r4.large instance type.

  • R4_XLARGE: A r4.xlarge instance type.

  • R4_2XLARGE: A r4.2xlarge instance type.

  • R4_4XLARGE: A r4.4xlarge instance type.

  • R4_8XLARGE: A r4.8xlarge instance type.

" + "documentation":"

The cache instance type. Valid values are

  • SMALL

  • MEDIUM

  • LARGE

  • XLARGE

  • LARGE_2X

  • LARGE_4X

  • LARGE_8X (not available in all regions)

  • LARGE_12X

Historically, instance types were identified by an EC2-style value. As of July 2020, this is deprecated, and the generic identifiers above should be used.

The following legacy instance types are available, but their use is discouraged:

  • T2_SMALL: A t2.small instance type.

  • T2_MEDIUM: A t2.medium instance type.

  • R4_LARGE: A r4.large instance type.

  • R4_XLARGE: A r4.xlarge instance type.

  • R4_2XLARGE: A r4.2xlarge instance type.

  • R4_4XLARGE: A r4.4xlarge instance type.

  • R4_8XLARGE: A r4.8xlarge instance type.

" } }, "documentation":"

Represents the input of a CreateApiCache operation.

" @@ -1985,7 +1985,7 @@ }, "wafWebAclArn":{ "shape":"String", - "documentation":"

The ARN of the AWS WAF ACL associated with this GraphqlApi if one exists.

" + "documentation":"

The ARN of the AWS Web Application Firewall (WAF) ACL associated with this GraphqlApi, if one exists.

" } }, "documentation":"

Describes a GraphQL API.

" @@ -2755,11 +2755,11 @@ }, "apiCachingBehavior":{ "shape":"ApiCachingBehavior", - "documentation":"

Caching behavior.

  • FULL_REQUEST_CACHING: All requests are fully cached.

  • PER_RESOLVER_CACHING: Individual resovlers that you specify are cached.

" + "documentation":"

Caching behavior.

  • FULL_REQUEST_CACHING: All requests are fully cached.

  • PER_RESOLVER_CACHING: Individual resolvers that you specify are cached.

" }, "type":{ "shape":"ApiCacheType", - "documentation":"

The cache instance type. Valid values are

  • SMALL

  • MEDIUM

  • LARGE

  • XLARGE

  • LARGE_2X

  • LARGE_4X

  • LARGE_8X (not available in all regions)

  • LARGE_12X

Historically, instance types were identified by an EC2-style value. As of July 2020, this is deprecated, and the generic identifiers above should be used.

The following legacy instance types are avaible, but their use is discouraged:

  • T2_SMALL: A t2.small instance type.

  • T2_MEDIUM: A t2.medium instance type.

  • R4_LARGE: A r4.large instance type.

  • R4_XLARGE: A r4.xlarge instance type.

  • R4_2XLARGE: A r4.2xlarge instance type.

  • R4_4XLARGE: A r4.4xlarge instance type.

  • R4_8XLARGE: A r4.8xlarge instance type.

" + "documentation":"

The cache instance type. Valid values are

  • SMALL

  • MEDIUM

  • LARGE

  • XLARGE

  • LARGE_2X

  • LARGE_4X

  • LARGE_8X (not available in all regions)

  • LARGE_12X

Historically, instance types were identified by an EC2-style value. As of July 2020, this is deprecated, and the generic identifiers above should be used.

The following legacy instance types are available, but their use is discouraged:

  • T2_SMALL: A t2.small instance type.

  • T2_MEDIUM: A t2.medium instance type.

  • R4_LARGE: A r4.large instance type.

  • R4_XLARGE: A r4.xlarge instance type.

  • R4_2XLARGE: A r4.2xlarge instance type.

  • R4_4XLARGE: A r4.4xlarge instance type.

  • R4_8XLARGE: A r4.8xlarge instance type.

" } }, "documentation":"

Represents the input of a UpdateApiCache operation.

" diff --git a/services/athena/pom.xml b/services/athena/pom.xml index bbb9332b7169..ef57bb5b2366 100644 --- a/services/athena/pom.xml +++ b/services/athena/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT athena AWS Java SDK :: Services :: Amazon Athena diff --git a/services/autoscaling/pom.xml b/services/autoscaling/pom.xml index cbad073fd44c..54eca885a89b 100644 --- a/services/autoscaling/pom.xml +++ b/services/autoscaling/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT autoscaling AWS Java SDK :: Services :: Auto Scaling diff --git a/services/autoscaling/src/main/resources/codegen-resources/service-2.json b/services/autoscaling/src/main/resources/codegen-resources/service-2.json index ceffe8c946cb..f75da0c12148 100644 --- a/services/autoscaling/src/main/resources/codegen-resources/service-2.json +++ b/services/autoscaling/src/main/resources/codegen-resources/service-2.json @@ -22,7 +22,7 @@ {"shape":"ResourceContentionFault"}, {"shape":"ServiceLinkedRoleFailure"} ], - "documentation":"

Attaches one or more EC2 instances to the specified Auto Scaling group.

When you attach instances, Amazon EC2 Auto Scaling increases the desired capacity of the group by the number of instances being attached. If the number of instances being attached plus the desired capacity of the group exceeds the maximum size of the group, the operation fails.

If there is a Classic Load Balancer attached to your Auto Scaling group, the instances are also registered with the load balancer. If there are target groups attached to your Auto Scaling group, the instances are also registered with the target groups.

For more information, see Attach EC2 Instances to Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Attaches one or more EC2 instances to the specified Auto Scaling group.

When you attach instances, Amazon EC2 Auto Scaling increases the desired capacity of the group by the number of instances being attached. If the number of instances being attached plus the desired capacity of the group exceeds the maximum size of the group, the operation fails.

If there is a Classic Load Balancer attached to your Auto Scaling group, the instances are also registered with the load balancer. If there are target groups attached to your Auto Scaling group, the instances are also registered with the target groups.

For more information, see Attach EC2 instances to your Auto Scaling group in the Amazon EC2 Auto Scaling User Guide.

" }, "AttachLoadBalancerTargetGroups":{ "name":"AttachLoadBalancerTargetGroups", @@ -39,7 +39,7 @@ {"shape":"ResourceContentionFault"}, {"shape":"ServiceLinkedRoleFailure"} ], - "documentation":"

Attaches one or more target groups to the specified Auto Scaling group.

To describe the target groups for an Auto Scaling group, call the DescribeLoadBalancerTargetGroups API. To detach the target group from the Auto Scaling group, call the DetachLoadBalancerTargetGroups API.

With Application Load Balancers and Network Load Balancers, instances are registered as targets with a target group. With Classic Load Balancers, instances are registered with the load balancer. For more information, see Attaching a Load Balancer to Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Attaches one or more target groups to the specified Auto Scaling group.

This operation is used with the following load balancer types:

  • Application Load Balancer - Operates at the application layer (layer 7) and supports HTTP and HTTPS.

  • Network Load Balancer - Operates at the transport layer (layer 4) and supports TCP, TLS, and UDP.

  • Gateway Load Balancer - Operates at the network layer (layer 3).

To describe the target groups for an Auto Scaling group, call the DescribeLoadBalancerTargetGroups API. To detach the target group from the Auto Scaling group, call the DetachLoadBalancerTargetGroups API.

For more information, see Elastic Load Balancing and Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

" }, "AttachLoadBalancers":{ "name":"AttachLoadBalancers", @@ -56,7 +56,7 @@ {"shape":"ResourceContentionFault"}, {"shape":"ServiceLinkedRoleFailure"} ], - "documentation":"

To attach an Application Load Balancer or a Network Load Balancer, use the AttachLoadBalancerTargetGroups API operation instead.

Attaches one or more Classic Load Balancers to the specified Auto Scaling group. Amazon EC2 Auto Scaling registers the running instances with these Classic Load Balancers.

To describe the load balancers for an Auto Scaling group, call the DescribeLoadBalancers API. To detach the load balancer from the Auto Scaling group, call the DetachLoadBalancers API.

For more information, see Attaching a Load Balancer to Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

To attach an Application Load Balancer, Network Load Balancer, or Gateway Load Balancer, use the AttachLoadBalancerTargetGroups API operation instead.

Attaches one or more Classic Load Balancers to the specified Auto Scaling group. Amazon EC2 Auto Scaling registers the running instances with these Classic Load Balancers.

To describe the load balancers for an Auto Scaling group, call the DescribeLoadBalancers API. To detach the load balancer from the Auto Scaling group, call the DetachLoadBalancers API.

For more information, see Elastic Load Balancing and Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

" }, "BatchDeleteScheduledAction":{ "name":"BatchDeleteScheduledAction", @@ -124,7 +124,7 @@ "errors":[ {"shape":"ResourceContentionFault"} ], - "documentation":"

Completes the lifecycle action for the specified token or instance with the specified result.

This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:

  1. (Optional) Create a Lambda function and a rule that allows CloudWatch Events to invoke your Lambda function when Amazon EC2 Auto Scaling launches or terminates instances.

  2. (Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.

  3. Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.

  4. If you need more time, record the lifecycle action heartbeat to keep the instance in a pending state.

  5. If you finish before the timeout period ends, complete the lifecycle action.

For more information, see Amazon EC2 Auto Scaling Lifecycle Hooks in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Completes the lifecycle action for the specified token or instance with the specified result.

This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:

  1. (Optional) Create a Lambda function and a rule that allows CloudWatch Events to invoke your Lambda function when Amazon EC2 Auto Scaling launches or terminates instances.

  2. (Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.

  3. Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.

  4. If you need more time, record the lifecycle action heartbeat to keep the instance in a pending state.

  5. If you finish before the timeout period ends, complete the lifecycle action.

For more information, see Amazon EC2 Auto Scaling lifecycle hooks in the Amazon EC2 Auto Scaling User Guide.

" }, "CreateAutoScalingGroup":{ "name":"CreateAutoScalingGroup", @@ -139,7 +139,7 @@ {"shape":"ResourceContentionFault"}, {"shape":"ServiceLinkedRoleFailure"} ], - "documentation":"

Creates an Auto Scaling group with the specified name and attributes.

If you exceed your maximum limit of Auto Scaling groups, the call fails. To query this limit, call the DescribeAccountLimits API. For information about updating this limit, see Amazon EC2 Auto Scaling Service Quotas in the Amazon EC2 Auto Scaling User Guide.

For introductory exercises for creating an Auto Scaling group, see Getting Started with Amazon EC2 Auto Scaling and Tutorial: Set Up a Scaled and Load-Balanced Application in the Amazon EC2 Auto Scaling User Guide. For more information, see Auto Scaling Groups in the Amazon EC2 Auto Scaling User Guide.

Every Auto Scaling group has three size parameters (DesiredCapacity, MaxSize, and MinSize). Usually, you set these sizes based on a specific number of instances. However, if you configure a mixed instances policy that defines weights for the instance types, you must specify these sizes with the same units that you use for weighting instances.

" + "documentation":"

Creates an Auto Scaling group with the specified name and attributes.

If you exceed your maximum limit of Auto Scaling groups, the call fails. To query this limit, call the DescribeAccountLimits API. For information about updating this limit, see Amazon EC2 Auto Scaling service quotas in the Amazon EC2 Auto Scaling User Guide.

For introductory exercises for creating an Auto Scaling group, see Getting started with Amazon EC2 Auto Scaling and Tutorial: Set up a scaled and load-balanced application in the Amazon EC2 Auto Scaling User Guide. For more information, see Auto Scaling groups in the Amazon EC2 Auto Scaling User Guide.

Every Auto Scaling group has three size parameters (DesiredCapacity, MaxSize, and MinSize). Usually, you set these sizes based on a specific number of instances. However, if you configure a mixed instances policy that defines weights for the instance types, you must specify these sizes with the same units that you use for weighting instances.

" }, "CreateLaunchConfiguration":{ "name":"CreateLaunchConfiguration", @@ -153,7 +153,7 @@ {"shape":"LimitExceededFault"}, {"shape":"ResourceContentionFault"} ], - "documentation":"

Creates a launch configuration.

If you exceed your maximum limit of launch configurations, the call fails. To query this limit, call the DescribeAccountLimits API. For information about updating this limit, see Amazon EC2 Auto Scaling Service Quotas in the Amazon EC2 Auto Scaling User Guide.

For more information, see Launch Configurations in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Creates a launch configuration.

If you exceed your maximum limit of launch configurations, the call fails. To query this limit, call the DescribeAccountLimits API. For information about updating this limit, see Amazon EC2 Auto Scaling service quotas in the Amazon EC2 Auto Scaling User Guide.

For more information, see Launch configurations in the Amazon EC2 Auto Scaling User Guide.

" }, "CreateOrUpdateTags":{ "name":"CreateOrUpdateTags", @@ -168,7 +168,7 @@ {"shape":"ResourceContentionFault"}, {"shape":"ResourceInUseFault"} ], - "documentation":"

Creates or updates tags for the specified Auto Scaling group.

When you specify a tag with a key that already exists, the operation overwrites the previous tag definition, and you do not get an error message.

For more information, see Tagging Auto Scaling Groups and Instances in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Creates or updates tags for the specified Auto Scaling group.

When you specify a tag with a key that already exists, the operation overwrites the previous tag definition, and you do not get an error message.

For more information, see Tagging Auto Scaling groups and instances in the Amazon EC2 Auto Scaling User Guide.

" }, "DeleteAutoScalingGroup":{ "name":"DeleteAutoScalingGroup", @@ -236,7 +236,7 @@ {"shape":"ResourceContentionFault"}, {"shape":"ServiceLinkedRoleFailure"} ], - "documentation":"

Deletes the specified scaling policy.

Deleting either a step scaling policy or a simple scaling policy deletes the underlying alarm action, but does not delete the alarm, even if it no longer has an associated action.

For more information, see Deleting a Scaling Policy in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Deletes the specified scaling policy.

Deleting either a step scaling policy or a simple scaling policy deletes the underlying alarm action, but does not delete the alarm, even if it no longer has an associated action.

For more information, see Deleting a scaling policy in the Amazon EC2 Auto Scaling User Guide.

" }, "DeleteScheduledAction":{ "name":"DeleteScheduledAction", @@ -276,7 +276,7 @@ "errors":[ {"shape":"ResourceContentionFault"} ], - "documentation":"

Describes the current Amazon EC2 Auto Scaling resource quotas for your AWS account.

For information about requesting an increase, see Amazon EC2 Auto Scaling Service Quotas in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Describes the current Amazon EC2 Auto Scaling resource quotas for your AWS account.

For information about requesting an increase, see Amazon EC2 Auto Scaling service quotas in the Amazon EC2 Auto Scaling User Guide.

" }, "DescribeAdjustmentTypes":{ "name":"DescribeAdjustmentTypes", @@ -437,7 +437,7 @@ "errors":[ {"shape":"ResourceContentionFault"} ], - "documentation":"

Describes the load balancers for the specified Auto Scaling group.

This operation describes only Classic Load Balancers. If you have Application Load Balancers or Network Load Balancers, use the DescribeLoadBalancerTargetGroups API instead.

" + "documentation":"

Describes the load balancers for the specified Auto Scaling group.

This operation describes only Classic Load Balancers. If you have Application Load Balancers, Network Load Balancers, or Gateway Load Balancers, use the DescribeLoadBalancerTargetGroups API instead.

" }, "DescribeMetricCollectionTypes":{ "name":"DescribeMetricCollectionTypes", @@ -553,7 +553,7 @@ {"shape":"InvalidNextToken"}, {"shape":"ResourceContentionFault"} ], - "documentation":"

Describes the specified tags.

You can use filters to limit the results. For example, you can query for the tags for a specific Auto Scaling group. You can specify multiple values for a filter. A tag must match at least one of the specified values for it to be included in the results.

You can also specify multiple filters. The result includes information for a particular tag only if it matches all the filters. If there's no match, no special message is returned.

For more information, see Tagging Auto Scaling Groups and Instances in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Describes the specified tags.

You can use filters to limit the results. For example, you can query for the tags for a specific Auto Scaling group. You can specify multiple values for a filter. A tag must match at least one of the specified values for it to be included in the results.

You can also specify multiple filters. The result includes information for a particular tag only if it matches all the filters. If there's no match, no special message is returned.

For more information, see Tagging Auto Scaling groups and instances in the Amazon EC2 Auto Scaling User Guide.

" }, "DescribeTerminationPolicyTypes":{ "name":"DescribeTerminationPolicyTypes", @@ -568,7 +568,7 @@ "errors":[ {"shape":"ResourceContentionFault"} ], - "documentation":"

Describes the termination policies supported by Amazon EC2 Auto Scaling.

For more information, see Controlling Which Auto Scaling Instances Terminate During Scale In in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Describes the termination policies supported by Amazon EC2 Auto Scaling.

For more information, see Controlling which Auto Scaling instances terminate during scale in in the Amazon EC2 Auto Scaling User Guide.

" }, "DetachInstances":{ "name":"DetachInstances", @@ -584,7 +584,7 @@ "errors":[ {"shape":"ResourceContentionFault"} ], - "documentation":"

Removes one or more instances from the specified Auto Scaling group.

After the instances are detached, you can manage them independent of the Auto Scaling group.

If you do not specify the option to decrement the desired capacity, Amazon EC2 Auto Scaling launches instances to replace the ones that are detached.

If there is a Classic Load Balancer attached to the Auto Scaling group, the instances are deregistered from the load balancer. If there are target groups attached to the Auto Scaling group, the instances are deregistered from the target groups.

For more information, see Detach EC2 Instances from Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Removes one or more instances from the specified Auto Scaling group.

After the instances are detached, you can manage them independent of the Auto Scaling group.

If you do not specify the option to decrement the desired capacity, Amazon EC2 Auto Scaling launches instances to replace the ones that are detached.

If there is a Classic Load Balancer attached to the Auto Scaling group, the instances are deregistered from the load balancer. If there are target groups attached to the Auto Scaling group, the instances are deregistered from the target groups.

For more information, see Detach EC2 instances from your Auto Scaling group in the Amazon EC2 Auto Scaling User Guide.

" }, "DetachLoadBalancerTargetGroups":{ "name":"DetachLoadBalancerTargetGroups", @@ -616,7 +616,7 @@ "errors":[ {"shape":"ResourceContentionFault"} ], - "documentation":"

Detaches one or more Classic Load Balancers from the specified Auto Scaling group.

This operation detaches only Classic Load Balancers. If you have Application Load Balancers or Network Load Balancers, use the DetachLoadBalancerTargetGroups API instead.

When you detach a load balancer, it enters the Removing state while deregistering the instances in the group. When all instances are deregistered, then you can no longer describe the load balancer using the DescribeLoadBalancers API call. The instances remain running.

" + "documentation":"

Detaches one or more Classic Load Balancers from the specified Auto Scaling group.

This operation detaches only Classic Load Balancers. If you have Application Load Balancers, Network Load Balancers, or Gateway Load Balancers, use the DetachLoadBalancerTargetGroups API instead.

When you detach a load balancer, it enters the Removing state while deregistering the instances in the group. When all instances are deregistered, then you can no longer describe the load balancer using the DescribeLoadBalancers API call. The instances remain running.

" }, "DisableMetricsCollection":{ "name":"DisableMetricsCollection", @@ -640,7 +640,7 @@ "errors":[ {"shape":"ResourceContentionFault"} ], - "documentation":"

Enables group metrics for the specified Auto Scaling group. For more information, see Monitoring Your Auto Scaling Groups and Instances in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Enables group metrics for the specified Auto Scaling group. For more information, see Monitoring CloudWatch metrics for your Auto Scaling groups and instances in the Amazon EC2 Auto Scaling User Guide.

" }, "EnterStandby":{ "name":"EnterStandby", @@ -656,7 +656,7 @@ "errors":[ {"shape":"ResourceContentionFault"} ], - "documentation":"

Moves the specified instances into the standby state.

If you choose to decrement the desired capacity of the Auto Scaling group, the instances can enter standby as long as the desired capacity of the Auto Scaling group after the instances are placed into standby is equal to or greater than the minimum capacity of the group.

If you choose not to decrement the desired capacity of the Auto Scaling group, the Auto Scaling group launches new instances to replace the instances on standby.

For more information, see Temporarily Removing Instances from Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Moves the specified instances into the standby state.

If you choose to decrement the desired capacity of the Auto Scaling group, the instances can enter standby as long as the desired capacity of the Auto Scaling group after the instances are placed into standby is equal to or greater than the minimum capacity of the group.

If you choose not to decrement the desired capacity of the Auto Scaling group, the Auto Scaling group launches new instances to replace the instances on standby.

For more information, see Temporarily removing instances from your Auto Scaling group in the Amazon EC2 Auto Scaling User Guide.

" }, "ExecutePolicy":{ "name":"ExecutePolicy", @@ -685,7 +685,7 @@ "errors":[ {"shape":"ResourceContentionFault"} ], - "documentation":"

Moves the specified instances out of the standby state.

After you put the instances back in service, the desired capacity is incremented.

For more information, see Temporarily Removing Instances from Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Moves the specified instances out of the standby state.

After you put the instances back in service, the desired capacity is incremented.

For more information, see Temporarily removing instances from your Auto Scaling group in the Amazon EC2 Auto Scaling User Guide.

" }, "PutLifecycleHook":{ "name":"PutLifecycleHook", @@ -702,7 +702,7 @@ {"shape":"LimitExceededFault"}, {"shape":"ResourceContentionFault"} ], - "documentation":"

Creates or updates a lifecycle hook for the specified Auto Scaling group.

A lifecycle hook tells Amazon EC2 Auto Scaling to perform an action on an instance when the instance launches (before it is put into service) or as the instance terminates (before it is fully terminated).

This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:

  1. (Optional) Create a Lambda function and a rule that allows CloudWatch Events to invoke your Lambda function when Amazon EC2 Auto Scaling launches or terminates instances.

  2. (Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.

  3. Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.

  4. If you need more time, record the lifecycle action heartbeat to keep the instance in a pending state using the RecordLifecycleActionHeartbeat API call.

  5. If you finish before the timeout period ends, complete the lifecycle action using the CompleteLifecycleAction API call.

For more information, see Amazon EC2 Auto Scaling Lifecycle Hooks in the Amazon EC2 Auto Scaling User Guide.

If you exceed your maximum limit of lifecycle hooks, which by default is 50 per Auto Scaling group, the call fails.

You can view the lifecycle hooks for an Auto Scaling group using the DescribeLifecycleHooks API call. If you are no longer using a lifecycle hook, you can delete it by calling the DeleteLifecycleHook API.

" + "documentation":"

Creates or updates a lifecycle hook for the specified Auto Scaling group.

A lifecycle hook tells Amazon EC2 Auto Scaling to perform an action on an instance when the instance launches (before it is put into service) or as the instance terminates (before it is fully terminated).

This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:

  1. (Optional) Create a Lambda function and a rule that allows CloudWatch Events to invoke your Lambda function when Amazon EC2 Auto Scaling launches or terminates instances.

  2. (Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.

  3. Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.

  4. If you need more time, record the lifecycle action heartbeat to keep the instance in a pending state using the RecordLifecycleActionHeartbeat API call.

  5. If you finish before the timeout period ends, complete the lifecycle action using the CompleteLifecycleAction API call.

For more information, see Amazon EC2 Auto Scaling lifecycle hooks in the Amazon EC2 Auto Scaling User Guide.

If you exceed your maximum limit of lifecycle hooks, which by default is 50 per Auto Scaling group, the call fails.

You can view the lifecycle hooks for an Auto Scaling group using the DescribeLifecycleHooks API call. If you are no longer using a lifecycle hook, you can delete it by calling the DeleteLifecycleHook API.

" }, "PutNotificationConfiguration":{ "name":"PutNotificationConfiguration", @@ -716,7 +716,7 @@ {"shape":"ResourceContentionFault"}, {"shape":"ServiceLinkedRoleFailure"} ], - "documentation":"

Configures an Auto Scaling group to send notifications when specified events take place. Subscribers to the specified topic can have messages delivered to an endpoint such as a web server or an email address.

This configuration overwrites any existing configuration.

For more information, see Getting Amazon SNS Notifications When Your Auto Scaling Group Scales in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Configures an Auto Scaling group to send notifications when specified events take place. Subscribers to the specified topic can have messages delivered to an endpoint such as a web server or an email address.

This configuration overwrites any existing configuration.

For more information, see Getting Amazon SNS notifications when your Auto Scaling group scales in the Amazon EC2 Auto Scaling User Guide.

If you exceed your maximum limit of SNS topics, which is 10 per Auto Scaling group, the call fails.

" }, "PutScalingPolicy":{ "name":"PutScalingPolicy", @@ -734,7 +734,7 @@ {"shape":"ResourceContentionFault"}, {"shape":"ServiceLinkedRoleFailure"} ], - "documentation":"

Creates or updates a scaling policy for an Auto Scaling group.

For more information about using scaling policies to scale your Auto Scaling group, see Target Tracking Scaling Policies and Step and Simple Scaling Policies in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Creates or updates a scaling policy for an Auto Scaling group.

For more information about using scaling policies to scale your Auto Scaling group, see Target tracking scaling policies and Step and simple scaling policies in the Amazon EC2 Auto Scaling User Guide.

" }, "PutScheduledUpdateGroupAction":{ "name":"PutScheduledUpdateGroupAction", @@ -748,7 +748,7 @@ {"shape":"LimitExceededFault"}, {"shape":"ResourceContentionFault"} ], - "documentation":"

Creates or updates a scheduled scaling action for an Auto Scaling group. If you leave a parameter unspecified when updating a scheduled scaling action, the corresponding value remains unchanged.

For more information, see Scheduled Scaling in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Creates or updates a scheduled scaling action for an Auto Scaling group. If you leave a parameter unspecified when updating a scheduled scaling action, the corresponding value remains unchanged.

For more information, see Scheduled scaling in the Amazon EC2 Auto Scaling User Guide.

" }, "RecordLifecycleActionHeartbeat":{ "name":"RecordLifecycleActionHeartbeat", @@ -764,7 +764,7 @@ "errors":[ {"shape":"ResourceContentionFault"} ], - "documentation":"

Records a heartbeat for the lifecycle action associated with the specified token or instance. This extends the timeout by the length of time defined using the PutLifecycleHook API call.

This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:

  1. (Optional) Create a Lambda function and a rule that allows CloudWatch Events to invoke your Lambda function when Amazon EC2 Auto Scaling launches or terminates instances.

  2. (Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.

  3. Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.

  4. If you need more time, record the lifecycle action heartbeat to keep the instance in a pending state.

  5. If you finish before the timeout period ends, complete the lifecycle action.

For more information, see Auto Scaling Lifecycle in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Records a heartbeat for the lifecycle action associated with the specified token or instance. This extends the timeout by the length of time defined using the PutLifecycleHook API call.

This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:

  1. (Optional) Create a Lambda function and a rule that allows CloudWatch Events to invoke your Lambda function when Amazon EC2 Auto Scaling launches or terminates instances.

  2. (Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.

  3. Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.

  4. If you need more time, record the lifecycle action heartbeat to keep the instance in a pending state.

  5. If you finish before the timeout period ends, complete the lifecycle action.

For more information, see Auto Scaling lifecycle in the Amazon EC2 Auto Scaling User Guide.

" }, "ResumeProcesses":{ "name":"ResumeProcesses", @@ -777,7 +777,7 @@ {"shape":"ResourceInUseFault"}, {"shape":"ResourceContentionFault"} ], - "documentation":"

Resumes the specified suspended automatic scaling processes, or all suspended process, for the specified Auto Scaling group.

For more information, see Suspending and Resuming Scaling Processes in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Resumes the specified suspended auto scaling processes, or all suspended process, for the specified Auto Scaling group.

For more information, see Suspending and resuming scaling processes in the Amazon EC2 Auto Scaling User Guide.

" }, "SetDesiredCapacity":{ "name":"SetDesiredCapacity", @@ -790,7 +790,7 @@ {"shape":"ScalingActivityInProgressFault"}, {"shape":"ResourceContentionFault"} ], - "documentation":"

Sets the size of the specified Auto Scaling group.

If a scale-in activity occurs as a result of a new DesiredCapacity value that is lower than the current size of the group, the Auto Scaling group uses its termination policy to determine which instances to terminate.

For more information, see Manual Scaling in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Sets the size of the specified Auto Scaling group.

If a scale-in activity occurs as a result of a new DesiredCapacity value that is lower than the current size of the group, the Auto Scaling group uses its termination policy to determine which instances to terminate.

For more information, see Manual scaling in the Amazon EC2 Auto Scaling User Guide.

" }, "SetInstanceHealth":{ "name":"SetInstanceHealth", @@ -802,7 +802,7 @@ "errors":[ {"shape":"ResourceContentionFault"} ], - "documentation":"

Sets the health status of the specified instance.

For more information, see Health Checks for Auto Scaling Instances in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Sets the health status of the specified instance.

For more information, see Health checks for Auto Scaling instances in the Amazon EC2 Auto Scaling User Guide.

" }, "SetInstanceProtection":{ "name":"SetInstanceProtection", @@ -819,7 +819,7 @@ {"shape":"LimitExceededFault"}, {"shape":"ResourceContentionFault"} ], - "documentation":"

Updates the instance protection settings of the specified instances.

For more information about preventing instances that are part of an Auto Scaling group from terminating on scale in, see Instance Protection in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Updates the instance protection settings of the specified instances.

For more information about preventing instances that are part of an Auto Scaling group from terminating on scale in, see Instance scale-in protection in the Amazon EC2 Auto Scaling User Guide.

If you exceed your maximum limit of instance IDs, which is 50 per Auto Scaling group, the call fails.

" }, "StartInstanceRefresh":{ "name":"StartInstanceRefresh", @@ -850,7 +850,7 @@ {"shape":"ResourceInUseFault"}, {"shape":"ResourceContentionFault"} ], - "documentation":"

Suspends the specified automatic scaling processes, or all processes, for the specified Auto Scaling group.

If you suspend either the Launch or Terminate process types, it can prevent other process types from functioning properly. For more information, see Suspending and Resuming Scaling Processes in the Amazon EC2 Auto Scaling User Guide.

To resume processes that have been suspended, call the ResumeProcesses API.

" + "documentation":"

Suspends the specified auto scaling processes, or all processes, for the specified Auto Scaling group.

If you suspend either the Launch or Terminate process types, it can prevent other process types from functioning properly. For more information, see Suspending and resuming scaling processes in the Amazon EC2 Auto Scaling User Guide.

To resume processes that have been suspended, call the ResumeProcesses API.

" }, "TerminateInstanceInAutoScalingGroup":{ "name":"TerminateInstanceInAutoScalingGroup", @@ -867,7 +867,7 @@ {"shape":"ScalingActivityInProgressFault"}, {"shape":"ResourceContentionFault"} ], - "documentation":"

Terminates the specified instance and optionally adjusts the desired group size.

This call simply makes a termination request. The instance is not terminated immediately. When an instance is terminated, the instance status changes to terminated. You can't connect to or start an instance after you've terminated it.

If you do not specify the option to decrement the desired capacity, Amazon EC2 Auto Scaling launches instances to replace the ones that are terminated.

By default, Amazon EC2 Auto Scaling balances instances across all Availability Zones. If you decrement the desired capacity, your Auto Scaling group can become unbalanced between Availability Zones. Amazon EC2 Auto Scaling tries to rebalance the group, and rebalancing might terminate instances in other zones. For more information, see Rebalancing Activities in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Terminates the specified instance and optionally adjusts the desired group size.

This call simply makes a termination request. The instance is not terminated immediately. When an instance is terminated, the instance status changes to terminated. You can't connect to or start an instance after you've terminated it.

If you do not specify the option to decrement the desired capacity, Amazon EC2 Auto Scaling launches instances to replace the ones that are terminated.

By default, Amazon EC2 Auto Scaling balances instances across all Availability Zones. If you decrement the desired capacity, your Auto Scaling group can become unbalanced between Availability Zones. Amazon EC2 Auto Scaling tries to rebalance the group, and rebalancing might terminate instances in other zones. For more information, see Rebalancing activities in the Amazon EC2 Auto Scaling User Guide.

" }, "UpdateAutoScalingGroup":{ "name":"UpdateAutoScalingGroup", @@ -1069,7 +1069,7 @@ }, "TargetGroupARNs":{ "shape":"TargetGroupARNs", - "documentation":"

The Amazon Resource Names (ARN) of the target groups. You can specify up to 10 target groups.

" + "documentation":"

The Amazon Resource Names (ARN) of the target groups. You can specify up to 10 target groups. To get the ARN of a target group, use the Elastic Load Balancing DescribeTargetGroups API operation.

" } } }, @@ -1211,6 +1211,10 @@ "MaxInstanceLifetime":{ "shape":"MaxInstanceLifetime", "documentation":"

The maximum amount of time, in seconds, that an instance can be in service.

Valid Range: Minimum value of 0.

" + }, + "CapacityRebalance":{ + "shape":"CapacityRebalanceEnabled", + "documentation":"

Indicates whether Capacity Rebalancing is enabled.

" } }, "documentation":"

Describes an Auto Scaling group.

" @@ -1227,7 +1231,7 @@ "members":{ "AutoScalingGroupNames":{ "shape":"AutoScalingGroupNames", - "documentation":"

The names of the Auto Scaling groups. Each name can be a maximum of 1600 characters. By default, you can only specify up to 50 names. You can optionally increase this limit using the MaxRecords parameter.

If you omit this parameter, all Auto Scaling groups are described.

" + "documentation":"

The names of the Auto Scaling groups. By default, you can only specify up to 50 names. You can optionally increase this limit using the MaxRecords parameter.

If you omit this parameter, all Auto Scaling groups are described.

" }, "NextToken":{ "shape":"XmlString", @@ -1452,6 +1456,7 @@ } } }, + "CapacityRebalanceEnabled":{"type":"boolean"}, "ClassicLinkVPCSecurityGroups":{ "type":"list", "member":{"shape":"XmlStringMaxLen255"} @@ -1506,19 +1511,19 @@ }, "LaunchConfigurationName":{ "shape":"ResourceName", - "documentation":"

The name of the launch configuration to use when an instance is launched. To get the launch configuration name, use the DescribeLaunchConfigurations API operation. New launch configurations can be created with the CreateLaunchConfiguration API.

You must specify one of the following parameters in your request: LaunchConfigurationName, LaunchTemplate, InstanceId, or MixedInstancesPolicy.

" + "documentation":"

The name of the launch configuration to use to launch instances.

Conditional: You must specify either a launch template (LaunchTemplate or MixedInstancesPolicy) or a launch configuration (LaunchConfigurationName or InstanceId).

" }, "LaunchTemplate":{ "shape":"LaunchTemplateSpecification", - "documentation":"

Parameters used to specify the launch template and version to use when an instance is launched.

For more information, see LaunchTemplateSpecification in the Amazon EC2 Auto Scaling API Reference.

You can alternatively associate a launch template to the Auto Scaling group by using the MixedInstancesPolicy parameter.

You must specify one of the following parameters in your request: LaunchConfigurationName, LaunchTemplate, InstanceId, or MixedInstancesPolicy.

" + "documentation":"

Parameters used to specify the launch template and version to use to launch instances.

Conditional: You must specify either a launch template (LaunchTemplate or MixedInstancesPolicy) or a launch configuration (LaunchConfigurationName or InstanceId).

The launch template that is specified must be configured for use with an Auto Scaling group. For more information, see Creating a launch template for an Auto Scaling group in the Amazon EC2 Auto Scaling User Guide.

" }, "MixedInstancesPolicy":{ "shape":"MixedInstancesPolicy", - "documentation":"

An embedded object that specifies a mixed instances policy. The required parameters must be specified. If optional parameters are unspecified, their default values are used.

The policy includes parameters that not only define the distribution of On-Demand Instances and Spot Instances, the maximum price to pay for Spot Instances, and how the Auto Scaling group allocates instance types to fulfill On-Demand and Spot capacity, but also the parameters that specify the instance configuration information—the launch template and instance types.

For more information, see MixedInstancesPolicy in the Amazon EC2 Auto Scaling API Reference and Auto Scaling Groups with Multiple Instance Types and Purchase Options in the Amazon EC2 Auto Scaling User Guide.

You must specify one of the following parameters in your request: LaunchConfigurationName, LaunchTemplate, InstanceId, or MixedInstancesPolicy.

" + "documentation":"

An embedded object that specifies a mixed instances policy. The required parameters must be specified. If optional parameters are unspecified, their default values are used.

The policy includes parameters that not only define the distribution of On-Demand Instances and Spot Instances, the maximum price to pay for Spot Instances, and how the Auto Scaling group allocates instance types to fulfill On-Demand and Spot capacities, but also the parameters that specify the instance configuration information—the launch template and instance types. The policy can also include a weight for each instance type and different launch templates for individual instance types. For more information, see Auto Scaling groups with multiple instance types and purchase options in the Amazon EC2 Auto Scaling User Guide.

" }, "InstanceId":{ "shape":"XmlStringMaxLen19", - "documentation":"

The ID of the instance used to create a launch configuration for the group. To get the instance ID, use the Amazon EC2 DescribeInstances API operation.

When you specify an ID of an instance, Amazon EC2 Auto Scaling creates a new launch configuration and associates it with the group. This launch configuration derives its attributes from the specified instance, except for the block device mapping.

You must specify one of the following parameters in your request: LaunchConfigurationName, LaunchTemplate, InstanceId, or MixedInstancesPolicy.

" + "documentation":"

The ID of the instance used to base the launch configuration on. If specified, Amazon EC2 Auto Scaling uses the configuration values from the specified instance to create a new launch configuration. To get the instance ID, use the Amazon EC2 DescribeInstances API operation. For more information, see Creating an Auto Scaling group using an EC2 instance in the Amazon EC2 Auto Scaling User Guide.

" }, "MinSize":{ "shape":"AutoScalingGroupMinSize", @@ -1530,63 +1535,67 @@ }, "DesiredCapacity":{ "shape":"AutoScalingGroupDesiredCapacity", - "documentation":"

The desired capacity is the initial capacity of the Auto Scaling group at the time of its creation and the capacity it attempts to maintain. It can scale beyond this capacity if you configure automatic scaling.

This number must be greater than or equal to the minimum size of the group and less than or equal to the maximum size of the group. If you do not specify a desired capacity, the default is the minimum size of the group.

" + "documentation":"

The desired capacity is the initial capacity of the Auto Scaling group at the time of its creation and the capacity it attempts to maintain. It can scale beyond this capacity if you configure auto scaling. This number must be greater than or equal to the minimum size of the group and less than or equal to the maximum size of the group. If you do not specify a desired capacity, the default is the minimum size of the group.

" }, "DefaultCooldown":{ "shape":"Cooldown", - "documentation":"

The amount of time, in seconds, after a scaling activity completes before another scaling activity can start. The default value is 300.

This setting applies when using simple scaling policies, but not when using other scaling policies or scheduled scaling. For more information, see Scaling Cooldowns for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

The amount of time, in seconds, after a scaling activity completes before another scaling activity can start. The default value is 300. This setting applies when using simple scaling policies, but not when using other scaling policies or scheduled scaling. For more information, see Scaling cooldowns for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

" }, "AvailabilityZones":{ "shape":"AvailabilityZones", - "documentation":"

One or more Availability Zones for the group. This parameter is optional if you specify one or more subnets for VPCZoneIdentifier.

Conditional: If your account supports EC2-Classic and VPC, this parameter is required to launch instances into EC2-Classic.

" + "documentation":"

A list of Availability Zones where instances in the Auto Scaling group can be created. This parameter is optional if you specify one or more subnets for VPCZoneIdentifier.

Conditional: If your account supports EC2-Classic and VPC, this parameter is required to launch instances into EC2-Classic.

" }, "LoadBalancerNames":{ "shape":"LoadBalancerNames", - "documentation":"

A list of Classic Load Balancers associated with this Auto Scaling group. For Application Load Balancers and Network Load Balancers, specify a list of target groups using the TargetGroupARNs property instead.

For more information, see Using a Load Balancer with an Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

A list of Classic Load Balancers associated with this Auto Scaling group. For Application Load Balancers, Network Load Balancers, and Gateway Load Balancers, specify the TargetGroupARNs property instead.

" }, "TargetGroupARNs":{ "shape":"TargetGroupARNs", - "documentation":"

The Amazon Resource Names (ARN) of the target groups to associate with the Auto Scaling group. Instances are registered as targets in a target group, and traffic is routed to the target group.

For more information, see Using a Load Balancer with an Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

The Amazon Resource Names (ARN) of the target groups to associate with the Auto Scaling group. Instances are registered as targets in a target group, and traffic is routed to the target group. For more information, see Elastic Load Balancing and Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

" }, "HealthCheckType":{ "shape":"XmlStringMaxLen32", - "documentation":"

The service to use for the health checks. The valid values are EC2 and ELB. The default value is EC2. If you configure an Auto Scaling group to use ELB health checks, it considers the instance unhealthy if it fails either the EC2 status checks or the load balancer health checks.

For more information, see Health Checks for Auto Scaling Instances in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

The service to use for the health checks. The valid values are EC2 (default) and ELB. If you configure an Auto Scaling group to use load balancer (ELB) health checks, it considers the instance unhealthy if it fails either the EC2 status checks or the load balancer health checks. For more information, see Health checks for Auto Scaling instances in the Amazon EC2 Auto Scaling User Guide.

" }, "HealthCheckGracePeriod":{ "shape":"HealthCheckGracePeriod", - "documentation":"

The amount of time, in seconds, that Amazon EC2 Auto Scaling waits before checking the health status of an EC2 instance that has come into service. During this time, any health check failures for the instance are ignored. The default value is 0.

For more information, see Health Check Grace Period in the Amazon EC2 Auto Scaling User Guide.

Required if you are adding an ELB health check.

" + "documentation":"

The amount of time, in seconds, that Amazon EC2 Auto Scaling waits before checking the health status of an EC2 instance that has come into service. During this time, any health check failures for the instance are ignored. The default value is 0. For more information, see Health check grace period in the Amazon EC2 Auto Scaling User Guide.

Conditional: Required if you are adding an ELB health check.

" }, "PlacementGroup":{ "shape":"XmlStringMaxLen255", - "documentation":"

The name of the placement group into which to launch your instances, if any. A placement group is a logical grouping of instances within a single Availability Zone. You cannot specify multiple Availability Zones and a placement group. For more information, see Placement Groups in the Amazon EC2 User Guide for Linux Instances.

" + "documentation":"

The name of an existing placement group into which to launch your instances, if any. A placement group is a logical grouping of instances within a single Availability Zone. You cannot specify multiple Availability Zones and a placement group. For more information, see Placement Groups in the Amazon EC2 User Guide for Linux Instances.

" }, "VPCZoneIdentifier":{ "shape":"XmlStringMaxLen2047", - "documentation":"

A comma-separated list of subnet IDs for your virtual private cloud (VPC).

If you specify VPCZoneIdentifier with AvailabilityZones, the subnets that you specify for this parameter must reside in those Availability Zones.

Conditional: If your account supports EC2-Classic and VPC, this parameter is required to launch instances into a VPC.

" + "documentation":"

A comma-separated list of subnet IDs for a virtual private cloud (VPC) where instances in the Auto Scaling group can be created. If you specify VPCZoneIdentifier with AvailabilityZones, the subnets that you specify for this parameter must reside in those Availability Zones.

Conditional: If your account supports EC2-Classic and VPC, this parameter is required to launch instances into a VPC.

" }, "TerminationPolicies":{ "shape":"TerminationPolicies", - "documentation":"

One or more termination policies used to select the instance to terminate. These policies are executed in the order that they are listed.

For more information, see Controlling Which Instances Auto Scaling Terminates During Scale In in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

A policy or a list of policies that are used to select the instance to terminate. These policies are executed in the order that you list them. For more information, see Controlling which Auto Scaling instances terminate during scale in in the Amazon EC2 Auto Scaling User Guide.

" }, "NewInstancesProtectedFromScaleIn":{ "shape":"InstanceProtected", - "documentation":"

Indicates whether newly launched instances are protected from termination by Amazon EC2 Auto Scaling when scaling in.

For more information about preventing instances from terminating on scale in, see Instance Protection in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Indicates whether newly launched instances are protected from termination by Amazon EC2 Auto Scaling when scaling in. For more information about preventing instances from terminating on scale in, see Instance scale-in protection in the Amazon EC2 Auto Scaling User Guide.

" + }, + "CapacityRebalance":{ + "shape":"CapacityRebalanceEnabled", + "documentation":"

Indicates whether Capacity Rebalancing is enabled. Otherwise, Capacity Rebalancing is disabled. When you turn on Capacity Rebalancing, Amazon EC2 Auto Scaling attempts to launch a Spot Instance whenever Amazon EC2 notifies that a Spot Instance is at an elevated risk of interruption. After launching a new instance, it then terminates an old instance. For more information, see Amazon EC2 Auto Scaling Capacity Rebalancing in the Amazon EC2 Auto Scaling User Guide.

" }, "LifecycleHookSpecificationList":{ "shape":"LifecycleHookSpecifications", - "documentation":"

One or more lifecycle hooks.

" + "documentation":"

One or more lifecycle hooks for the group, which specify actions to perform when Amazon EC2 Auto Scaling launches or terminates instances.

" }, "Tags":{ "shape":"Tags", - "documentation":"

One or more tags. You can tag your Auto Scaling group and propagate the tags to the Amazon EC2 instances it launches.

Tags are not propagated to Amazon EBS volumes. To add tags to Amazon EBS volumes, specify the tags in a launch template but use caution. If the launch template specifies an instance tag with a key that is also specified for the Auto Scaling group, Amazon EC2 Auto Scaling overrides the value of that instance tag with the value specified by the Auto Scaling group.

For more information, see Tagging Auto Scaling Groups and Instances in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

One or more tags. You can tag your Auto Scaling group and propagate the tags to the Amazon EC2 instances it launches. Tags are not propagated to Amazon EBS volumes. To add tags to Amazon EBS volumes, specify the tags in a launch template but use caution. If the launch template specifies an instance tag with a key that is also specified for the Auto Scaling group, Amazon EC2 Auto Scaling overrides the value of that instance tag with the value specified by the Auto Scaling group. For more information, see Tagging Auto Scaling groups and instances in the Amazon EC2 Auto Scaling User Guide.

" }, "ServiceLinkedRoleARN":{ "shape":"ResourceName", - "documentation":"

The Amazon Resource Name (ARN) of the service-linked role that the Auto Scaling group uses to call other AWS services on your behalf. By default, Amazon EC2 Auto Scaling uses a service-linked role named AWSServiceRoleForAutoScaling, which it creates if it does not exist. For more information, see Service-Linked Roles in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of the service-linked role that the Auto Scaling group uses to call other AWS services on your behalf. By default, Amazon EC2 Auto Scaling uses a service-linked role named AWSServiceRoleForAutoScaling, which it creates if it does not exist. For more information, see Service-linked roles in the Amazon EC2 Auto Scaling User Guide.

" }, "MaxInstanceLifetime":{ "shape":"MaxInstanceLifetime", - "documentation":"

The maximum amount of time, in seconds, that an instance can be in service. The default is null.

This parameter is optional, but if you specify a value for it, you must specify a value of at least 604,800 seconds (7 days). To clear a previously set value, specify a new value of 0.

For more information, see Replacing Auto Scaling Instances Based on Maximum Instance Lifetime in the Amazon EC2 Auto Scaling User Guide.

Valid Range: Minimum value of 0.

" + "documentation":"

The maximum amount of time, in seconds, that an instance can be in service. The default is null. If specified, the value must be either 0 or a number equal to or greater than 86,400 seconds (1 day). For more information, see Replacing Auto Scaling instances based on maximum instance lifetime in the Amazon EC2 Auto Scaling User Guide.

" } } }, @@ -1612,19 +1621,19 @@ }, "ClassicLinkVPCId":{ "shape":"XmlStringMaxLen255", - "documentation":"

The ID of a ClassicLink-enabled VPC to link your EC2-Classic instances to. For more information, see ClassicLink in the Amazon EC2 User Guide for Linux Instances and Linking EC2-Classic Instances to a VPC in the Amazon EC2 Auto Scaling User Guide.

This parameter can only be used if you are launching EC2-Classic instances.

" + "documentation":"

The ID of a ClassicLink-enabled VPC to link your EC2-Classic instances to. For more information, see ClassicLink in the Amazon EC2 User Guide for Linux Instances and Linking EC2-Classic instances to a VPC in the Amazon EC2 Auto Scaling User Guide.

This parameter can only be used if you are launching EC2-Classic instances.

" }, "ClassicLinkVPCSecurityGroups":{ "shape":"ClassicLinkVPCSecurityGroups", - "documentation":"

The IDs of one or more security groups for the specified ClassicLink-enabled VPC. For more information, see ClassicLink in the Amazon EC2 User Guide for Linux Instances and Linking EC2-Classic Instances to a VPC in the Amazon EC2 Auto Scaling User Guide.

If you specify the ClassicLinkVPCId parameter, you must specify this parameter.

" + "documentation":"

The IDs of one or more security groups for the specified ClassicLink-enabled VPC. For more information, see ClassicLink in the Amazon EC2 User Guide for Linux Instances and Linking EC2-Classic instances to a VPC in the Amazon EC2 Auto Scaling User Guide.

If you specify the ClassicLinkVPCId parameter, you must specify this parameter.

" }, "UserData":{ "shape":"XmlStringUserData", - "documentation":"

The Base64-encoded user data to make available to the launched EC2 instances. For more information, see Instance Metadata and User Data in the Amazon EC2 User Guide for Linux Instances.

" + "documentation":"

The Base64-encoded user data to make available to the launched EC2 instances. For more information, see Instance metadata and user data in the Amazon EC2 User Guide for Linux Instances.

" }, "InstanceId":{ "shape":"XmlStringMaxLen19", - "documentation":"

The ID of the instance to use to create the launch configuration. The new launch configuration derives attributes from the instance, except for the block device mapping.

To create a launch configuration with a block device mapping or override any other instance attributes, specify them as part of the same request.

For more information, see Create a Launch Configuration Using an EC2 Instance in the Amazon EC2 Auto Scaling User Guide.

If you do not specify InstanceId, you must specify both ImageId and InstanceType.

" + "documentation":"

The ID of the instance to use to create the launch configuration. The new launch configuration derives attributes from the instance, except for the block device mapping.

To create a launch configuration with a block device mapping or override any other instance attributes, specify them as part of the same request.

For more information, see Creating a launch configuration using an EC2 instance in the Amazon EC2 Auto Scaling User Guide.

If you do not specify InstanceId, you must specify both ImageId and InstanceType.

" }, "InstanceType":{ "shape":"XmlStringMaxLen255", @@ -1644,15 +1653,15 @@ }, "InstanceMonitoring":{ "shape":"InstanceMonitoring", - "documentation":"

Controls whether instances in this group are launched with detailed (true) or basic (false) monitoring.

The default value is true (enabled).

When detailed monitoring is enabled, Amazon CloudWatch generates metrics every minute and your account is charged a fee. When you disable detailed monitoring, CloudWatch generates metrics every 5 minutes. For more information, see Configure Monitoring for Auto Scaling Instances in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Controls whether instances in this group are launched with detailed (true) or basic (false) monitoring.

The default value is true (enabled).

When detailed monitoring is enabled, Amazon CloudWatch generates metrics every minute and your account is charged a fee. When you disable detailed monitoring, CloudWatch generates metrics every 5 minutes. For more information, see Configure Monitoring for Auto Scaling Instances in the Amazon EC2 Auto Scaling User Guide.

" }, "SpotPrice":{ "shape":"SpotPrice", - "documentation":"

The maximum hourly price to be paid for any Spot Instance launched to fulfill the request. Spot Instances are launched when the price you specify exceeds the current Spot price. For more information, see Launching Spot Instances in Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.

When you change your maximum price by creating a new launch configuration, running instances will continue to run as long as the maximum price for those running instances is higher than the current Spot price.

" + "documentation":"

The maximum hourly price to be paid for any Spot Instance launched to fulfill the request. Spot Instances are launched when the price you specify exceeds the current Spot price. For more information, see Requesting Spot Instances in the Amazon EC2 Auto Scaling User Guide.

When you change your maximum price by creating a new launch configuration, running instances will continue to run as long as the maximum price for those running instances is higher than the current Spot price.

" }, "IamInstanceProfile":{ "shape":"XmlStringMaxLen1600", - "documentation":"

The name or the Amazon Resource Name (ARN) of the instance profile associated with the IAM role for the instance. The instance profile contains the IAM role.

For more information, see IAM Role for Applications That Run on Amazon EC2 Instances in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

The name or the Amazon Resource Name (ARN) of the instance profile associated with the IAM role for the instance. The instance profile contains the IAM role.

For more information, see IAM role for applications that run on Amazon EC2 instances in the Amazon EC2 Auto Scaling User Guide.

" }, "EbsOptimized":{ "shape":"EbsOptimized", @@ -1660,15 +1669,15 @@ }, "AssociatePublicIpAddress":{ "shape":"AssociatePublicIpAddress", - "documentation":"

For Auto Scaling groups that are running in a virtual private cloud (VPC), specifies whether to assign a public IP address to the group's instances. If you specify true, each instance in the Auto Scaling group receives a unique public IP address. For more information, see Launching Auto Scaling Instances in a VPC in the Amazon EC2 Auto Scaling User Guide.

If you specify this parameter, you must specify at least one subnet for VPCZoneIdentifier when you create your group.

If the instance is launched into a default subnet, the default is to assign a public IP address, unless you disabled the option to assign a public IP address on the subnet. If the instance is launched into a nondefault subnet, the default is not to assign a public IP address, unless you enabled the option to assign a public IP address on the subnet.

" + "documentation":"

For Auto Scaling groups that are running in a virtual private cloud (VPC), specifies whether to assign a public IP address to the group's instances. If you specify true, each instance in the Auto Scaling group receives a unique public IP address. For more information, see Launching Auto Scaling instances in a VPC in the Amazon EC2 Auto Scaling User Guide.

If you specify this parameter, you must specify at least one subnet for VPCZoneIdentifier when you create your group.

If the instance is launched into a default subnet, the default is to assign a public IP address, unless you disabled the option to assign a public IP address on the subnet. If the instance is launched into a nondefault subnet, the default is not to assign a public IP address, unless you enabled the option to assign a public IP address on the subnet.

" }, "PlacementTenancy":{ "shape":"XmlStringMaxLen64", - "documentation":"

The tenancy of the instance. An instance with dedicated tenancy runs on isolated, single-tenant hardware and can only be launched into a VPC.

To launch dedicated instances into a shared tenancy VPC (a VPC with the instance placement tenancy attribute set to default), you must set the value of this parameter to dedicated.

If you specify PlacementTenancy, you must specify at least one subnet for VPCZoneIdentifier when you create your group.

For more information, see Instance Placement Tenancy in the Amazon EC2 Auto Scaling User Guide.

Valid Values: default | dedicated

" + "documentation":"

The tenancy of the instance. An instance with dedicated tenancy runs on isolated, single-tenant hardware and can only be launched into a VPC.

To launch dedicated instances into a shared tenancy VPC (a VPC with the instance placement tenancy attribute set to default), you must set the value of this parameter to dedicated.

If you specify PlacementTenancy, you must specify at least one subnet for VPCZoneIdentifier when you create your group.

For more information, see Configuring instance tenancy with Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

Valid Values: default | dedicated

" }, "MetadataOptions":{ "shape":"InstanceMetadataOptions", - "documentation":"

The metadata options for the instances. For more information, see Instance Metadata and User Data in the Amazon EC2 User Guide for Linux Instances.

" + "documentation":"

The metadata options for the instances. For more information, see Configuring the Instance Metadata Options in the Amazon EC2 Auto Scaling User Guide.

" } } }, @@ -2251,7 +2260,7 @@ }, "Encrypted":{ "shape":"BlockDeviceEbsEncrypted", - "documentation":"

Specifies whether the volume should be encrypted. Encrypted EBS volumes can only be attached to instances that support Amazon EBS encryption. For more information, see Supported Instance Types. If your AMI uses encrypted volumes, you can also only launch it on supported instance types.

If you are creating a volume from a snapshot, you cannot specify an encryption value. Volumes that are created from encrypted snapshots are automatically encrypted, and volumes that are created from unencrypted snapshots are automatically unencrypted. By default, encrypted snapshots use the AWS managed CMK that is used for EBS encryption, but you can specify a custom CMK when you create the snapshot. The ability to encrypt a snapshot during copying also allows you to apply a new CMK to an already-encrypted snapshot. Volumes restored from the resulting copy are only accessible using the new CMK.

Enabling encryption by default results in all EBS volumes being encrypted with the AWS managed CMK or a customer managed CMK, whether or not the snapshot was encrypted.

For more information, see Using Encryption with EBS-Backed AMIs in the Amazon EC2 User Guide for Linux Instances and Required CMK Key Policy for Use with Encrypted Volumes in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Specifies whether the volume should be encrypted. Encrypted EBS volumes can only be attached to instances that support Amazon EBS encryption. For more information, see Supported Instance Types. If your AMI uses encrypted volumes, you can also only launch it on supported instance types.

If you are creating a volume from a snapshot, you cannot specify an encryption value. Volumes that are created from encrypted snapshots are automatically encrypted, and volumes that are created from unencrypted snapshots are automatically unencrypted. By default, encrypted snapshots use the AWS managed CMK that is used for EBS encryption, but you can specify a custom CMK when you create the snapshot. The ability to encrypt a snapshot during copying also allows you to apply a new CMK to an already-encrypted snapshot. Volumes restored from the resulting copy are only accessible using the new CMK.

Enabling encryption by default results in all EBS volumes being encrypted with the AWS managed CMK or a customer managed CMK, whether or not the snapshot was encrypted.

For more information, see Using Encryption with EBS-Backed AMIs in the Amazon EC2 User Guide for Linux Instances and Required CMK key policy for use with encrypted volumes in the Amazon EC2 Auto Scaling User Guide.

" } }, "documentation":"

Describes information used to set up an Amazon EBS volume specified in a block device mapping.

" @@ -2341,7 +2350,7 @@ }, "HonorCooldown":{ "shape":"HonorCooldown", - "documentation":"

Indicates whether Amazon EC2 Auto Scaling waits for the cooldown period to complete before executing the policy.

Valid only if the policy type is SimpleScaling. For more information, see Scaling Cooldowns for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Indicates whether Amazon EC2 Auto Scaling waits for the cooldown period to complete before executing the policy.

Valid only if the policy type is SimpleScaling. For more information, see Scaling cooldowns for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

" }, "MetricValue":{ "shape":"MetricScale", @@ -2411,7 +2420,7 @@ "documentation":"

One or more filter values. Filter values are case-sensitive.

" } }, - "documentation":"

Describes a filter that is used to return a more specific list of results when describing tags.

For more information, see Tagging Auto Scaling Groups and Instances in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Describes a filter that is used to return a more specific list of results when describing tags.

For more information, see Tagging Auto Scaling groups and instances in the Amazon EC2 Auto Scaling User Guide.

" }, "Filters":{ "type":"list", @@ -2510,7 +2519,7 @@ "documentation":"

This parameter enables or disables the HTTP metadata endpoint on your instances. If the parameter is not specified, the default state is enabled.

If you specify a value of disabled, you will not be able to access your instance metadata.

" } }, - "documentation":"

The metadata options for the instances. For more information, see Instance Metadata and User Data in the Amazon EC2 User Guide for Linux Instances.

" + "documentation":"

The metadata options for the instances. For more information, see Configuring the Instance Metadata Options in the Amazon EC2 Auto Scaling User Guide.

" }, "InstanceMonitoring":{ "type":"structure", @@ -2602,30 +2611,30 @@ "members":{ "OnDemandAllocationStrategy":{ "shape":"XmlString", - "documentation":"

Indicates how to allocate instance types to fulfill On-Demand capacity.

The only valid value is prioritized, which is also the default value. This strategy uses the order of instance type overrides for the LaunchTemplate to define the launch priority of each instance type. The first instance type in the array is prioritized higher than the last. If all your On-Demand capacity cannot be fulfilled using your highest priority instance, then the Auto Scaling groups launches the remaining capacity using the second priority instance type, and so on.

" + "documentation":"

Indicates how to allocate instance types to fulfill On-Demand capacity. The only valid value is prioritized, which is also the default value. This strategy uses the order of instance types in the overrides to define the launch priority of each instance type. The first instance type in the array is prioritized higher than the last. If all your On-Demand capacity cannot be fulfilled using your highest priority instance, then the Auto Scaling groups launches the remaining capacity using the second priority instance type, and so on.

" }, "OnDemandBaseCapacity":{ "shape":"OnDemandBaseCapacity", - "documentation":"

The minimum amount of the Auto Scaling group's capacity that must be fulfilled by On-Demand Instances. This base portion is provisioned first as your group scales.

Default if not set is 0. If you leave it set to 0, On-Demand Instances are launched as a percentage of the Auto Scaling group's desired capacity, per the OnDemandPercentageAboveBaseCapacity setting.

An update to this setting means a gradual replacement of instances to maintain the specified number of On-Demand Instances for your base capacity. When replacing instances, Amazon EC2 Auto Scaling launches new instances before terminating the old ones.

" + "documentation":"

The minimum amount of the Auto Scaling group's capacity that must be fulfilled by On-Demand Instances. This base portion is provisioned first as your group scales. Defaults to 0 if not specified. If you specify weights for the instance types in the overrides, set the value of OnDemandBaseCapacity in terms of the number of capacity units, and not the number of instances.

" }, "OnDemandPercentageAboveBaseCapacity":{ "shape":"OnDemandPercentageAboveBaseCapacity", - "documentation":"

Controls the percentages of On-Demand Instances and Spot Instances for your additional capacity beyond OnDemandBaseCapacity.

Default if not set is 100. If you leave it set to 100, the percentages are 100% for On-Demand Instances and 0% for Spot Instances.

An update to this setting means a gradual replacement of instances to maintain the percentage of On-Demand Instances for your additional capacity above the base capacity. When replacing instances, Amazon EC2 Auto Scaling launches new instances before terminating the old ones.

Valid Range: Minimum value of 0. Maximum value of 100.

" + "documentation":"

Controls the percentages of On-Demand Instances and Spot Instances for your additional capacity beyond OnDemandBaseCapacity. Expressed as a number (for example, 20 specifies 20% On-Demand Instances, 80% Spot Instances). Defaults to 100 if not specified. If set to 100, only On-Demand Instances are provisioned.

" }, "SpotAllocationStrategy":{ "shape":"XmlString", - "documentation":"

Indicates how to allocate instances across Spot Instance pools.

If the allocation strategy is lowest-price, the Auto Scaling group launches instances using the Spot pools with the lowest price, and evenly allocates your instances across the number of Spot pools that you specify. If the allocation strategy is capacity-optimized, the Auto Scaling group launches instances using Spot pools that are optimally chosen based on the available Spot capacity.

The default Spot allocation strategy for calls that you make through the API, the AWS CLI, or the AWS SDKs is lowest-price. The default Spot allocation strategy for the AWS Management Console is capacity-optimized.

Valid values: lowest-price | capacity-optimized

" + "documentation":"

Indicates how to allocate instances across Spot Instance pools. If the allocation strategy is capacity-optimized (recommended), the Auto Scaling group launches instances using Spot pools that are optimally chosen based on the available Spot capacity. If the allocation strategy is lowest-price, the Auto Scaling group launches instances using the Spot pools with the lowest price, and evenly allocates your instances across the number of Spot pools that you specify. Defaults to lowest-price if not specified.

" }, "SpotInstancePools":{ "shape":"SpotInstancePools", - "documentation":"

The number of Spot Instance pools across which to allocate your Spot Instances. The Spot pools are determined from the different instance types in the Overrides array of LaunchTemplate. Default if not set is 2.

Used only when the Spot allocation strategy is lowest-price.

Valid Range: Minimum value of 1. Maximum value of 20.

" + "documentation":"

The number of Spot Instance pools across which to allocate your Spot Instances. The Spot pools are determined from the different instance types in the overrides. Valid only when the Spot allocation strategy is lowest-price. Value must be in the range of 1 to 20. Defaults to 2 if not specified.

" }, "SpotMaxPrice":{ "shape":"MixedInstanceSpotPrice", - "documentation":"

The maximum price per unit hour that you are willing to pay for a Spot Instance. If you leave the value of this parameter blank (which is the default), the maximum Spot price is set at the On-Demand price.

To remove a value that you previously set, include the parameter but leave the value blank.

" + "documentation":"

The maximum price per unit hour that you are willing to pay for a Spot Instance. If you leave the value at its default (empty), Amazon EC2 Auto Scaling uses the On-Demand price as the maximum Spot price. To remove a value that you previously set, include the property but specify an empty string (\"\") for the value.

" } }, - "documentation":"

Describes an instances distribution for an Auto Scaling group with a MixedInstancesPolicy.

The instances distribution specifies the distribution of On-Demand Instances and Spot Instances, the maximum price to pay for Spot Instances, and how the Auto Scaling group allocates instance types to fulfill On-Demand and Spot capacity.

When you update SpotAllocationStrategy, SpotInstancePools, or SpotMaxPrice, this update action does not deploy any changes across the running Amazon EC2 instances in the group. Your existing Spot Instances continue to run as long as the maximum price for those instances is higher than the current Spot price. When scale out occurs, Amazon EC2 Auto Scaling launches instances based on the new settings. When scale in occurs, Amazon EC2 Auto Scaling terminates instances according to the group's termination policies.

" + "documentation":"

Describes an instances distribution for an Auto Scaling group with a MixedInstancesPolicy.

The instances distribution specifies the distribution of On-Demand Instances and Spot Instances, the maximum price to pay for Spot Instances, and how the Auto Scaling group allocates instance types to fulfill On-Demand and Spot capacities.

When you update SpotAllocationStrategy, SpotInstancePools, or SpotMaxPrice, this update action does not deploy any changes across the running Amazon EC2 instances in the group. Your existing Spot Instances continue to run as long as the maximum price for those instances is higher than the current Spot price. When scale out occurs, Amazon EC2 Auto Scaling launches instances based on the new settings. When scale in occurs, Amazon EC2 Auto Scaling terminates instances according to the group's termination policies.

" }, "InstancesToUpdate":{ "type":"integer", @@ -2671,7 +2680,7 @@ }, "ImageId":{ "shape":"XmlStringMaxLen255", - "documentation":"

The ID of the Amazon Machine Image (AMI) to use to launch your EC2 instances.

For more information, see Finding an AMI in the Amazon EC2 User Guide for Linux Instances.

" + "documentation":"

The ID of the Amazon Machine Image (AMI) to use to launch your EC2 instances. For more information, see Finding an AMI in the Amazon EC2 User Guide for Linux Instances.

" }, "KeyName":{ "shape":"XmlStringMaxLen255", @@ -2679,19 +2688,19 @@ }, "SecurityGroups":{ "shape":"SecurityGroups", - "documentation":"

A list that contains the security groups to assign to the instances in the Auto Scaling group.

For more information, see Security Groups for Your VPC in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

A list that contains the security groups to assign to the instances in the Auto Scaling group. For more information, see Security Groups for Your VPC in the Amazon Virtual Private Cloud User Guide.

" }, "ClassicLinkVPCId":{ "shape":"XmlStringMaxLen255", - "documentation":"

The ID of a ClassicLink-enabled VPC to link your EC2-Classic instances to.

For more information, see ClassicLink in the Amazon EC2 User Guide for Linux Instances and Linking EC2-Classic Instances to a VPC in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

The ID of a ClassicLink-enabled VPC to link your EC2-Classic instances to. For more information, see ClassicLink in the Amazon EC2 User Guide for Linux Instances and Linking EC2-Classic instances to a VPC in the Amazon EC2 Auto Scaling User Guide.

" }, "ClassicLinkVPCSecurityGroups":{ "shape":"ClassicLinkVPCSecurityGroups", - "documentation":"

The IDs of one or more security groups for the VPC specified in ClassicLinkVPCId.

For more information, see ClassicLink in the Amazon EC2 User Guide for Linux Instances and Linking EC2-Classic Instances to a VPC in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

The IDs of one or more security groups for the VPC specified in ClassicLinkVPCId.

For more information, see ClassicLink in the Amazon EC2 User Guide for Linux Instances and Linking EC2-Classic instances to a VPC in the Amazon EC2 Auto Scaling User Guide.

" }, "UserData":{ "shape":"XmlStringUserData", - "documentation":"

The Base64-encoded user data to make available to the launched EC2 instances.

For more information, see Instance Metadata and User Data in the Amazon EC2 User Guide for Linux Instances.

" + "documentation":"

The Base64-encoded user data to make available to the launched EC2 instances. For more information, see Instance metadata and user data in the Amazon EC2 User Guide for Linux Instances.

" }, "InstanceType":{ "shape":"XmlStringMaxLen255", @@ -2707,19 +2716,19 @@ }, "BlockDeviceMappings":{ "shape":"BlockDeviceMappings", - "documentation":"

A block device mapping, which specifies the block devices for the instance.

For more information, see Block Device Mapping in the Amazon EC2 User Guide for Linux Instances.

" + "documentation":"

A block device mapping, which specifies the block devices for the instance. For more information, see Block Device Mapping in the Amazon EC2 User Guide for Linux Instances.

" }, "InstanceMonitoring":{ "shape":"InstanceMonitoring", - "documentation":"

Controls whether instances in this group are launched with detailed (true) or basic (false) monitoring.

For more information, see Configure Monitoring for Auto Scaling Instances in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Controls whether instances in this group are launched with detailed (true) or basic (false) monitoring.

For more information, see Configure Monitoring for Auto Scaling Instances in the Amazon EC2 Auto Scaling User Guide.

" }, "SpotPrice":{ "shape":"SpotPrice", - "documentation":"

The maximum hourly price to be paid for any Spot Instance launched to fulfill the request. Spot Instances are launched when the price you specify exceeds the current Spot price.

For more information, see Launching Spot Instances in Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

The maximum hourly price to be paid for any Spot Instance launched to fulfill the request. Spot Instances are launched when the price you specify exceeds the current Spot price. For more information, see Requesting Spot Instances in the Amazon EC2 Auto Scaling User Guide.

" }, "IamInstanceProfile":{ "shape":"XmlStringMaxLen1600", - "documentation":"

The name or the Amazon Resource Name (ARN) of the instance profile associated with the IAM role for the instance. The instance profile contains the IAM role.

For more information, see IAM Role for Applications That Run on Amazon EC2 Instances in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

The name or the Amazon Resource Name (ARN) of the instance profile associated with the IAM role for the instance. The instance profile contains the IAM role. For more information, see IAM role for applications that run on Amazon EC2 instances in the Amazon EC2 Auto Scaling User Guide.

" }, "CreatedTime":{ "shape":"TimestampType", @@ -2727,19 +2736,19 @@ }, "EbsOptimized":{ "shape":"EbsOptimized", - "documentation":"

Specifies whether the launch configuration is optimized for EBS I/O (true) or not (false).

For more information, see Amazon EBS-Optimized Instances in the Amazon EC2 User Guide for Linux Instances.

" + "documentation":"

Specifies whether the launch configuration is optimized for EBS I/O (true) or not (false). For more information, see Amazon EBS-Optimized Instances in the Amazon EC2 User Guide for Linux Instances.

" }, "AssociatePublicIpAddress":{ "shape":"AssociatePublicIpAddress", - "documentation":"

For Auto Scaling groups that are running in a VPC, specifies whether to assign a public IP address to the group's instances.

For more information, see Launching Auto Scaling Instances in a VPC in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

For Auto Scaling groups that are running in a VPC, specifies whether to assign a public IP address to the group's instances. For more information, see Launching Auto Scaling instances in a VPC in the Amazon EC2 Auto Scaling User Guide.

" }, "PlacementTenancy":{ "shape":"XmlStringMaxLen64", - "documentation":"

The tenancy of the instance, either default or dedicated. An instance with dedicated tenancy runs on isolated, single-tenant hardware and can only be launched into a VPC.

For more information, see Instance Placement Tenancy in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

The tenancy of the instance, either default or dedicated. An instance with dedicated tenancy runs on isolated, single-tenant hardware and can only be launched into a VPC.

For more information, see Configuring instance tenancy with Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

" }, "MetadataOptions":{ "shape":"InstanceMetadataOptions", - "documentation":"

The metadata options for the instances. For more information, see Instance Metadata and User Data in the Amazon EC2 User Guide for Linux Instances.

" + "documentation":"

The metadata options for the instances. For more information, see Configuring the Instance Metadata Options in the Amazon EC2 Auto Scaling User Guide.

" } }, "documentation":"

Describes a launch configuration.

" @@ -2798,52 +2807,56 @@ "members":{ "LaunchTemplateSpecification":{ "shape":"LaunchTemplateSpecification", - "documentation":"

The launch template to use. You must specify either the launch template ID or launch template name in the request.

" + "documentation":"

The launch template to use.

" }, "Overrides":{ "shape":"Overrides", - "documentation":"

Any parameters that you specify override the same parameters in the launch template. Currently, the only supported override is instance type. You can specify between 1 and 20 instance types.

If not provided, Amazon EC2 Auto Scaling will use the instance type specified in the launch template to launch instances.

" + "documentation":"

Any parameters that you specify override the same parameters in the launch template. If not provided, Amazon EC2 Auto Scaling uses the instance type specified in the launch template when it launches an instance.

" } }, - "documentation":"

Describes a launch template and overrides.

The overrides are used to override the instance type specified by the launch template with multiple instance types that can be used to launch On-Demand Instances and Spot Instances.

When you update the launch template or overrides, existing Amazon EC2 instances continue to run. When scale out occurs, Amazon EC2 Auto Scaling launches instances to match the new settings. When scale in occurs, Amazon EC2 Auto Scaling terminates instances according to the group's termination policies.

" + "documentation":"

Describes a launch template and overrides.

You specify these parameters as part of a mixed instances policy.

When you update the launch template or overrides, existing Amazon EC2 instances continue to run. When scale out occurs, Amazon EC2 Auto Scaling launches instances to match the new settings. When scale in occurs, Amazon EC2 Auto Scaling terminates instances according to the group's termination policies.

" }, "LaunchTemplateName":{ "type":"string", "max":128, "min":3, - "pattern":"[a-zA-Z0-9\\(\\)\\.-/_]+" + "pattern":"[a-zA-Z0-9\\(\\)\\.\\-/_]+" }, "LaunchTemplateOverrides":{ "type":"structure", "members":{ "InstanceType":{ "shape":"XmlStringMaxLen255", - "documentation":"

The instance type. You must use an instance type that is supported in your requested Region and Availability Zones.

For information about available instance types, see Available Instance Types in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

The instance type, such as m3.xlarge. You must use an instance type that is supported in your requested Region and Availability Zones. For more information, see Instance types in the Amazon Elastic Compute Cloud User Guide.

" }, "WeightedCapacity":{ "shape":"XmlStringMaxLen32", - "documentation":"

The number of capacity units, which gives the instance type a proportional weight to other instance types. For example, larger instance types are generally weighted more than smaller instance types. These are the same units that you chose to set the desired capacity in terms of instances, or a performance attribute such as vCPUs, memory, or I/O.

For more information, see Instance Weighting for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

Valid Range: Minimum value of 1. Maximum value of 999.

" + "documentation":"

The number of capacity units provided by the specified instance type in terms of virtual CPUs, memory, storage, throughput, or other relative performance characteristic. When a Spot or On-Demand Instance is provisioned, the capacity units count toward the desired capacity. Amazon EC2 Auto Scaling provisions instances until the desired capacity is totally fulfilled, even if this results in an overage. For example, if there are 2 units remaining to fulfill capacity, and Amazon EC2 Auto Scaling can only provision an instance with a WeightedCapacity of 5 units, the instance is provisioned, and the desired capacity is exceeded by 3 units. For more information, see Instance weighting for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide. Value must be in the range of 1 to 999.

" + }, + "LaunchTemplateSpecification":{ + "shape":"LaunchTemplateSpecification", + "documentation":"

Provides the launch template to be used when launching the instance type. For example, some instance types might require a launch template with a different AMI. If not provided, Amazon EC2 Auto Scaling uses the launch template that's defined for your mixed instances policy. For more information, see Specifying a different launch template for an instance type in the Amazon EC2 Auto Scaling User Guide.

" } }, - "documentation":"

Describes an override for a launch template. Currently, the only supported override is instance type.

The maximum number of instance type overrides that can be associated with an Auto Scaling group is 20.

" + "documentation":"

Describes an override for a launch template. The maximum number of instance types that can be associated with an Auto Scaling group is 20. For more information, see Configuring overrides in the Amazon EC2 Auto Scaling User Guide.

" }, "LaunchTemplateSpecification":{ "type":"structure", "members":{ "LaunchTemplateId":{ "shape":"XmlStringMaxLen255", - "documentation":"

The ID of the launch template. To get the template ID, use the Amazon EC2 DescribeLaunchTemplates API operation. New launch templates can be created using the Amazon EC2 CreateLaunchTemplate API.

You must specify either a template ID or a template name.

" + "documentation":"

The ID of the launch template. To get the template ID, use the Amazon EC2 DescribeLaunchTemplates API operation. New launch templates can be created using the Amazon EC2 CreateLaunchTemplate API.

Conditional: You must specify either a LaunchTemplateId or a LaunchTemplateName.

" }, "LaunchTemplateName":{ "shape":"LaunchTemplateName", - "documentation":"

The name of the launch template. To get the template name, use the Amazon EC2 DescribeLaunchTemplates API operation. New launch templates can be created using the Amazon EC2 CreateLaunchTemplate API.

You must specify either a template ID or a template name.

" + "documentation":"

The name of the launch template. To get the template name, use the Amazon EC2 DescribeLaunchTemplates API operation. New launch templates can be created using the Amazon EC2 CreateLaunchTemplate API.

Conditional: You must specify either a LaunchTemplateId or a LaunchTemplateName.

" }, "Version":{ "shape":"XmlStringMaxLen255", - "documentation":"

The version number, $Latest, or $Default. To get the version number, use the Amazon EC2 DescribeLaunchTemplateVersions API operation. New launch template versions can be created using the Amazon EC2 CreateLaunchTemplateVersion API.

If the value is $Latest, Amazon EC2 Auto Scaling selects the latest version of the launch template when launching instances. If the value is $Default, Amazon EC2 Auto Scaling selects the default version of the launch template when launching instances. The default value is $Default.

" + "documentation":"

The version number, $Latest, or $Default. To get the version number, use the Amazon EC2 DescribeLaunchTemplateVersions API operation. New launch template versions can be created using the Amazon EC2 CreateLaunchTemplateVersion API. If the value is $Latest, Amazon EC2 Auto Scaling selects the latest version of the launch template when launching instances. If the value is $Default, Amazon EC2 Auto Scaling selects the default version of the launch template when launching instances. The default value is $Default.

" } }, - "documentation":"

Describes the Amazon EC2 launch template and the launch template version that can be used by an Auto Scaling group to configure Amazon EC2 instances.

The launch template that is specified must be configured for use with an Auto Scaling group. For more information, see Creating a Launch Template for an Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Describes the Amazon EC2 launch template and the launch template version that can be used by an Auto Scaling group to configure Amazon EC2 instances.

The launch template that is specified must be configured for use with an Auto Scaling group. For more information, see Creating a launch template for an Auto Scaling group in the Amazon EC2 Auto Scaling User Guide.

" }, "LifecycleActionResult":{"type":"string"}, "LifecycleActionToken":{ @@ -2934,7 +2947,7 @@ "documentation":"

The ARN of the IAM role that allows the Auto Scaling group to publish to the specified notification target, for example, an Amazon SNS topic or an Amazon SQS queue.

" } }, - "documentation":"

Describes information used to specify a lifecycle hook for an Auto Scaling group.

A lifecycle hook tells Amazon EC2 Auto Scaling to perform an action on an instance when the instance launches (before it is put into service) or as the instance terminates (before it is fully terminated).

This step is a part of the procedure for creating a lifecycle hook for an Auto Scaling group:

  1. (Optional) Create a Lambda function and a rule that allows CloudWatch Events to invoke your Lambda function when Amazon EC2 Auto Scaling launches or terminates instances.

  2. (Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.

  3. Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.

  4. If you need more time, record the lifecycle action heartbeat to keep the instance in a pending state.

  5. If you finish before the timeout period ends, complete the lifecycle action.

For more information, see Amazon EC2 Auto Scaling Lifecycle Hooks in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Describes information used to specify a lifecycle hook for an Auto Scaling group.

A lifecycle hook tells Amazon EC2 Auto Scaling to perform an action on an instance when the instance launches (before it is put into service) or as the instance terminates (before it is fully terminated).

This step is a part of the procedure for creating a lifecycle hook for an Auto Scaling group:

  1. (Optional) Create a Lambda function and a rule that allows CloudWatch Events to invoke your Lambda function when Amazon EC2 Auto Scaling launches or terminates instances.

  2. (Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.

  3. Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.

  4. If you need more time, record the lifecycle action heartbeat to keep the instance in a pending state.

  5. If you finish before the timeout period ends, complete the lifecycle action.

For more information, see Amazon EC2 Auto Scaling lifecycle hooks in the Amazon EC2 Auto Scaling User Guide.

" }, "LifecycleHookSpecifications":{ "type":"list", @@ -3117,14 +3130,14 @@ "members":{ "LaunchTemplate":{ "shape":"LaunchTemplate", - "documentation":"

The launch template and instance types (overrides).

Required when creating a mixed instances policy.

" + "documentation":"

Specifies the launch template to use and optionally the instance types (overrides) that are used to provision EC2 instances to fulfill On-Demand and Spot capacities. Required when creating a mixed instances policy.

" }, "InstancesDistribution":{ "shape":"InstancesDistribution", - "documentation":"

The instances distribution to use.

If you leave this parameter unspecified, the value for each parameter in InstancesDistribution uses a default value.

" + "documentation":"

Specifies the instances distribution. If not provided, the value for each parameter in InstancesDistribution uses a default value.

" } }, - "documentation":"

Describes a mixed instances policy for an Auto Scaling group. With mixed instances, your Auto Scaling group can provision a combination of On-Demand Instances and Spot Instances across multiple instance types. For more information, see Auto Scaling Groups with Multiple Instance Types and Purchase Options in the Amazon EC2 Auto Scaling User Guide.

You can create a mixed instances policy for a new Auto Scaling group, or you can create it for an existing group by updating the group to specify MixedInstancesPolicy as the top-level parameter instead of a launch configuration or launch template. For more information, see CreateAutoScalingGroup and UpdateAutoScalingGroup.

" + "documentation":"

Describes a mixed instances policy for an Auto Scaling group. With mixed instances, your Auto Scaling group can provision a combination of On-Demand Instances and Spot Instances across multiple instance types. For more information, see Auto Scaling groups with multiple instance types and purchase options in the Amazon EC2 Auto Scaling User Guide.

You can create a mixed instances policy for a new Auto Scaling group, or you can create it for an existing group by updating the group to specify MixedInstancesPolicy as the top-level parameter instead of a launch configuration or launch template.

" }, "MonitoringEnabled":{"type":"boolean"}, "NoDevice":{"type":"boolean"}, @@ -3210,7 +3223,7 @@ }, "ResourceLabel":{ "shape":"XmlStringMaxLen1023", - "documentation":"

Identifies the resource associated with the metric type. You can't specify a resource label unless the metric type is ALBRequestCountPerTarget and there is a target group attached to the Auto Scaling group.

Elastic Load Balancing sends data about your load balancers to Amazon CloudWatch. CloudWatch collects the data and specifies the format to use to access the data. The format is app/load-balancer-name/load-balancer-id/targetgroup/target-group-name/target-group-id , where

  • app/load-balancer-name/load-balancer-id is the final portion of the load balancer ARN, and

  • targetgroup/target-group-name/target-group-id is the final portion of the target group ARN.

To find the ARN for an Application Load Balancer, use the DescribeLoadBalancers API operation. To find the ARN for the target group, use the DescribeTargetGroups API operation.

" + "documentation":"

Identifies the resource associated with the metric type. You can't specify a resource label unless the metric type is ALBRequestCountPerTarget and there is a target group attached to the Auto Scaling group.

You create the resource label by appending the final portion of the load balancer ARN and the final portion of the target group ARN into a single value, separated by a forward slash (/). The format is app/<load-balancer-name>/<load-balancer-id>/targetgroup/<target-group-name>/<target-group-id>, where:

  • app/<load-balancer-name>/<load-balancer-id> is the final portion of the load balancer ARN

  • targetgroup/<target-group-name>/<target-group-id> is the final portion of the target group ARN.

This is an example: app/EC2Co-EcsEl-1TKLTMITMM0EO/f37c06a68c1748aa/targetgroup/EC2Co-Defau-LDNM7Q3ZH1ZN/6d4ea56ca2d6a18d.

To find the ARN for an Application Load Balancer, use the DescribeLoadBalancers API operation. To find the ARN for the target group, use the DescribeTargetGroups API operation.

" } }, "documentation":"

Represents a predefined metric for a target tracking scaling policy to use with Amazon EC2 Auto Scaling.

" @@ -3228,7 +3241,7 @@ "documentation":"

One of the following processes:

  • Launch

  • Terminate

  • AddToLoadBalancer

  • AlarmNotification

  • AZRebalance

  • HealthCheck

  • InstanceRefresh

  • ReplaceUnhealthy

  • ScheduledActions

" } }, - "documentation":"

Describes a process type.

For more information, see Scaling Processes in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Describes a process type.

For more information, see Scaling processes in the Amazon EC2 Auto Scaling User Guide.

" }, "Processes":{ "type":"list", @@ -3335,7 +3348,7 @@ }, "AdjustmentType":{ "shape":"XmlStringMaxLen255", - "documentation":"

Specifies how the scaling adjustment is interpreted (for example, an absolute number or a percentage). The valid values are ChangeInCapacity, ExactCapacity, and PercentChangeInCapacity.

Required if the policy type is StepScaling or SimpleScaling. For more information, see Scaling Adjustment Types in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Specifies how the scaling adjustment is interpreted (for example, an absolute number or a percentage). The valid values are ChangeInCapacity, ExactCapacity, and PercentChangeInCapacity.

Required if the policy type is StepScaling or SimpleScaling. For more information, see Scaling adjustment types in the Amazon EC2 Auto Scaling User Guide.

" }, "MinAdjustmentStep":{ "shape":"MinAdjustmentStep", @@ -3343,7 +3356,7 @@ }, "MinAdjustmentMagnitude":{ "shape":"MinAdjustmentMagnitude", - "documentation":"

The minimum value to scale by when the adjustment type is PercentChangeInCapacity. For example, suppose that you create a step scaling policy to scale out an Auto Scaling group by 25 percent and you specify a MinAdjustmentMagnitude of 2. If the group has 4 instances and the scaling policy is performed, 25 percent of 4 is 1. However, because you specified a MinAdjustmentMagnitude of 2, Amazon EC2 Auto Scaling scales out the group by 2 instances.

Valid only if the policy type is StepScaling or SimpleScaling. For more information, see Scaling Adjustment Types in the Amazon EC2 Auto Scaling User Guide.

Some Auto Scaling groups use instance weights. In this case, set the MinAdjustmentMagnitude to a value that is at least as large as your largest instance weight.

" + "documentation":"

The minimum value to scale by when the adjustment type is PercentChangeInCapacity. For example, suppose that you create a step scaling policy to scale out an Auto Scaling group by 25 percent and you specify a MinAdjustmentMagnitude of 2. If the group has 4 instances and the scaling policy is performed, 25 percent of 4 is 1. However, because you specified a MinAdjustmentMagnitude of 2, Amazon EC2 Auto Scaling scales out the group by 2 instances.

Valid only if the policy type is StepScaling or SimpleScaling. For more information, see Scaling adjustment types in the Amazon EC2 Auto Scaling User Guide.

Some Auto Scaling groups use instance weights. In this case, set the MinAdjustmentMagnitude to a value that is at least as large as your largest instance weight.

" }, "ScalingAdjustment":{ "shape":"PolicyIncrement", @@ -3351,7 +3364,7 @@ }, "Cooldown":{ "shape":"Cooldown", - "documentation":"

The duration of the policy's cooldown period, in seconds. When a cooldown period is specified here, it overrides the default cooldown period defined for the Auto Scaling group.

Valid only if the policy type is SimpleScaling. For more information, see Scaling Cooldowns for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

The duration of the policy's cooldown period, in seconds. When a cooldown period is specified here, it overrides the default cooldown period defined for the Auto Scaling group.

Valid only if the policy type is SimpleScaling. For more information, see Scaling cooldowns for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

" }, "MetricAggregationType":{ "shape":"XmlStringMaxLen32", @@ -3371,7 +3384,7 @@ }, "Enabled":{ "shape":"ScalingPolicyEnabled", - "documentation":"

Indicates whether the scaling policy is enabled or disabled. The default is enabled. For more information, see Disabling a Scaling Policy for an Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Indicates whether the scaling policy is enabled or disabled. The default is enabled. For more information, see Disabling a scaling policy for an Auto Scaling group in the Amazon EC2 Auto Scaling User Guide.

" } } }, @@ -3564,7 +3577,7 @@ }, "PolicyType":{ "shape":"XmlStringMaxLen64", - "documentation":"

One of the following policy types:

  • TargetTrackingScaling

  • StepScaling

  • SimpleScaling (default)

For more information, see Target Tracking Scaling Policies and Step and Simple Scaling Policies in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

One of the following policy types:

  • TargetTrackingScaling

  • StepScaling

  • SimpleScaling (default)

For more information, see Target tracking scaling policies and Step and simple scaling policies in the Amazon EC2 Auto Scaling User Guide.

" }, "AdjustmentType":{ "shape":"XmlStringMaxLen255", @@ -3808,7 +3821,7 @@ "members":{ "InstanceIds":{ "shape":"InstanceIds", - "documentation":"

One or more instance IDs.

" + "documentation":"

One or more instance IDs. You can specify up to 50 instances.

" }, "AutoScalingGroupName":{ "shape":"ResourceName", @@ -3872,7 +3885,7 @@ "documentation":"

The amount by which to scale, based on the specified adjustment type. A positive value adds to the current capacity while a negative number removes from the current capacity.

" } }, - "documentation":"

Describes information used to create a step adjustment for a step scaling policy.

For the following examples, suppose that you have an alarm with a breach threshold of 50:

  • To trigger the adjustment when the metric is greater than or equal to 50 and less than 60, specify a lower bound of 0 and an upper bound of 10.

  • To trigger the adjustment when the metric is greater than 40 and less than or equal to 50, specify a lower bound of -10 and an upper bound of 0.

There are a few rules for the step adjustments for your step policy:

  • The ranges of your step adjustments can't overlap or have a gap.

  • At most, one step adjustment can have a null lower bound. If one step adjustment has a negative lower bound, then there must be a step adjustment with a null lower bound.

  • At most, one step adjustment can have a null upper bound. If one step adjustment has a positive upper bound, then there must be a step adjustment with a null upper bound.

  • The upper and lower bound can't be null in the same step adjustment.

For more information, see Step Adjustments in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Describes information used to create a step adjustment for a step scaling policy.

For the following examples, suppose that you have an alarm with a breach threshold of 50:

  • To trigger the adjustment when the metric is greater than or equal to 50 and less than 60, specify a lower bound of 0 and an upper bound of 10.

  • To trigger the adjustment when the metric is greater than 40 and less than or equal to 50, specify a lower bound of -10 and an upper bound of 0.

There are a few rules for the step adjustments for your step policy:

  • The ranges of your step adjustments can't overlap or have a gap.

  • At most, one step adjustment can have a null lower bound. If one step adjustment has a negative lower bound, then there must be a step adjustment with a null lower bound.

  • At most, one step adjustment can have a null upper bound. If one step adjustment has a positive upper bound, then there must be a step adjustment with a null upper bound.

  • The upper and lower bound can't be null in the same step adjustment.

For more information, see Step adjustments in the Amazon EC2 Auto Scaling User Guide.

" }, "StepAdjustments":{ "type":"list", @@ -3890,7 +3903,7 @@ "documentation":"

The reason that the process was suspended.

" } }, - "documentation":"

Describes an automatic scaling process that has been suspended.

For more information, see Scaling Processes in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Describes an auto scaling process that has been suspended.

For more information, see Scaling processes in the Amazon EC2 Auto Scaling User Guide.

" }, "SuspendedProcesses":{ "type":"list", @@ -4045,11 +4058,11 @@ }, "LaunchTemplate":{ "shape":"LaunchTemplateSpecification", - "documentation":"

The launch template and version to use to specify the updates. If you specify LaunchTemplate in your update request, you can't specify LaunchConfigurationName or MixedInstancesPolicy.

For more information, see LaunchTemplateSpecification in the Amazon EC2 Auto Scaling API Reference.

" + "documentation":"

The launch template and version to use to specify the updates. If you specify LaunchTemplate in your update request, you can't specify LaunchConfigurationName or MixedInstancesPolicy.

" }, "MixedInstancesPolicy":{ "shape":"MixedInstancesPolicy", - "documentation":"

An embedded object that specifies a mixed instances policy.

In your call to UpdateAutoScalingGroup, you can make changes to the policy that is specified. All optional parameters are left unchanged if not specified.

For more information, see MixedInstancesPolicy in the Amazon EC2 Auto Scaling API Reference and Auto Scaling Groups with Multiple Instance Types and Purchase Options in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

An embedded object that specifies a mixed instances policy. When you make changes to an existing policy, all optional parameters are left unchanged if not specified. For more information, see Auto Scaling groups with multiple instance types and purchase options in the Amazon EC2 Auto Scaling User Guide.

" }, "MinSize":{ "shape":"AutoScalingGroupMinSize", @@ -4061,11 +4074,11 @@ }, "DesiredCapacity":{ "shape":"AutoScalingGroupDesiredCapacity", - "documentation":"

The desired capacity is the initial capacity of the Auto Scaling group after this operation completes and the capacity it attempts to maintain.

This number must be greater than or equal to the minimum size of the group and less than or equal to the maximum size of the group.

" + "documentation":"

The desired capacity is the initial capacity of the Auto Scaling group after this operation completes and the capacity it attempts to maintain. This number must be greater than or equal to the minimum size of the group and less than or equal to the maximum size of the group.

" }, "DefaultCooldown":{ "shape":"Cooldown", - "documentation":"

The amount of time, in seconds, after a scaling activity completes before another scaling activity can start. The default value is 300.

This setting applies when using simple scaling policies, but not when using other scaling policies or scheduled scaling. For more information, see Scaling Cooldowns for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

The amount of time, in seconds, after a scaling activity completes before another scaling activity can start. The default value is 300. This setting applies when using simple scaling policies, but not when using other scaling policies or scheduled scaling. For more information, see Scaling cooldowns for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

" }, "AvailabilityZones":{ "shape":"AvailabilityZones", @@ -4077,31 +4090,35 @@ }, "HealthCheckGracePeriod":{ "shape":"HealthCheckGracePeriod", - "documentation":"

The amount of time, in seconds, that Amazon EC2 Auto Scaling waits before checking the health status of an EC2 instance that has come into service. The default value is 0.

For more information, see Health Check Grace Period in the Amazon EC2 Auto Scaling User Guide.

Required if you are adding an ELB health check.

" + "documentation":"

The amount of time, in seconds, that Amazon EC2 Auto Scaling waits before checking the health status of an EC2 instance that has come into service. The default value is 0. For more information, see Health check grace period in the Amazon EC2 Auto Scaling User Guide.

Conditional: Required if you are adding an ELB health check.

" }, "PlacementGroup":{ "shape":"XmlStringMaxLen255", - "documentation":"

The name of the placement group into which to launch your instances, if any. A placement group is a logical grouping of instances within a single Availability Zone. You cannot specify multiple Availability Zones and a placement group. For more information, see Placement Groups in the Amazon EC2 User Guide for Linux Instances.

" + "documentation":"

The name of an existing placement group into which to launch your instances, if any. A placement group is a logical grouping of instances within a single Availability Zone. You cannot specify multiple Availability Zones and a placement group. For more information, see Placement Groups in the Amazon EC2 User Guide for Linux Instances.

" }, "VPCZoneIdentifier":{ "shape":"XmlStringMaxLen2047", - "documentation":"

A comma-separated list of subnet IDs for virtual private cloud (VPC).

If you specify VPCZoneIdentifier with AvailabilityZones, the subnets that you specify for this parameter must reside in those Availability Zones.

" + "documentation":"

A comma-separated list of subnet IDs for a virtual private cloud (VPC). If you specify VPCZoneIdentifier with AvailabilityZones, the subnets that you specify for this parameter must reside in those Availability Zones.

" }, "TerminationPolicies":{ "shape":"TerminationPolicies", - "documentation":"

A standalone termination policy or a list of termination policies used to select the instance to terminate. The policies are executed in the order that they are listed.

For more information, see Controlling Which Instances Auto Scaling Terminates During Scale In in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

A policy or a list of policies that are used to select the instances to terminate. The policies are executed in the order that you list them. For more information, see Controlling which Auto Scaling instances terminate during scale in in the Amazon EC2 Auto Scaling User Guide.

" }, "NewInstancesProtectedFromScaleIn":{ "shape":"InstanceProtected", - "documentation":"

Indicates whether newly launched instances are protected from termination by Amazon EC2 Auto Scaling when scaling in.

For more information about preventing instances from terminating on scale in, see Instance Protection in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Indicates whether newly launched instances are protected from termination by Amazon EC2 Auto Scaling when scaling in. For more information about preventing instances from terminating on scale in, see Instance scale-in protection in the Amazon EC2 Auto Scaling User Guide.

" }, "ServiceLinkedRoleARN":{ "shape":"ResourceName", - "documentation":"

The Amazon Resource Name (ARN) of the service-linked role that the Auto Scaling group uses to call other AWS services on your behalf. For more information, see Service-Linked Roles in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of the service-linked role that the Auto Scaling group uses to call other AWS services on your behalf. For more information, see Service-linked roles in the Amazon EC2 Auto Scaling User Guide.

" }, "MaxInstanceLifetime":{ "shape":"MaxInstanceLifetime", - "documentation":"

The maximum amount of time, in seconds, that an instance can be in service. The default is null.

This parameter is optional, but if you specify a value for it, you must specify a value of at least 604,800 seconds (7 days). To clear a previously set value, specify a new value of 0.

For more information, see Replacing Auto Scaling Instances Based on Maximum Instance Lifetime in the Amazon EC2 Auto Scaling User Guide.

Valid Range: Minimum value of 0.

" + "documentation":"

The maximum amount of time, in seconds, that an instance can be in service. The default is null. If specified, the value must be either 0 or a number equal to or greater than 86,400 seconds (1 day). To clear a previously set value, specify a new value of 0. For more information, see Replacing Auto Scaling instances based on maximum instance lifetime in the Amazon EC2 Auto Scaling User Guide.

" + }, + "CapacityRebalance":{ + "shape":"CapacityRebalanceEnabled", + "documentation":"

Enables or disables Capacity Rebalancing. For more information, see Amazon EC2 Auto Scaling Capacity Rebalancing in the Amazon EC2 Auto Scaling User Guide.

" } } }, diff --git a/services/autoscalingplans/pom.xml b/services/autoscalingplans/pom.xml index 2c244b5f5fc0..0fd2a4599e24 100644 --- a/services/autoscalingplans/pom.xml +++ b/services/autoscalingplans/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT autoscalingplans AWS Java SDK :: Services :: Auto Scaling Plans diff --git a/services/backup/pom.xml b/services/backup/pom.xml index 7a2d0c2e290a..916215efad5d 100644 --- a/services/backup/pom.xml +++ b/services/backup/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT backup AWS Java SDK :: Services :: Backup diff --git a/services/backup/src/main/resources/codegen-resources/service-2.json b/services/backup/src/main/resources/codegen-resources/service-2.json index f561ab3413f6..bf498a9a634b 100644 --- a/services/backup/src/main/resources/codegen-resources/service-2.json +++ b/services/backup/src/main/resources/codegen-resources/service-2.json @@ -214,6 +214,19 @@ "documentation":"

Returns metadata associated with creating a copy of a resource.

", "idempotent":true }, + "DescribeGlobalSettings":{ + "name":"DescribeGlobalSettings", + "http":{ + "method":"GET", + "requestUri":"/global-settings" + }, + "input":{"shape":"DescribeGlobalSettingsInput"}, + "output":{"shape":"DescribeGlobalSettingsOutput"}, + "errors":[ + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

The current feature settings for the AWS Account.

" + }, "DescribeProtectedResource":{ "name":"DescribeProtectedResource", "http":{ @@ -259,7 +272,7 @@ "errors":[ {"shape":"ServiceUnavailableException"} ], - "documentation":"

Returns the current service opt-in settings for the Region. If the service has a value set to true, AWS Backup tries to protect that service's resources in this Region, when included in an on-demand backup or scheduled backup plan. If the value is set to false for a service, AWS Backup does not try to protect that service's resources in this Region.

" + "documentation":"

Returns the current service opt-in settings for the Region. If service-opt-in is enabled for a service, AWS Backup tries to protect that service's resources in this Region, when the resource is included in an on-demand backup or scheduled backup plan. Otherwise, AWS Backup does not try to protect that service's resources in this Region, AWS Backup does not try to protect that service's resources in this Region.

" }, "DescribeRestoreJob":{ "name":"DescribeRestoreJob", @@ -665,6 +678,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterValueException"}, {"shape":"MissingParameterValueException"}, + {"shape":"InvalidRequestException"}, {"shape":"ServiceUnavailableException"}, {"shape":"LimitExceededException"} ], @@ -703,7 +717,7 @@ {"shape":"MissingParameterValueException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Recovers the saved resource identified by an Amazon Resource Name (ARN).

If the resource ARN is included in the request, then the last complete backup of that resource is recovered. If the ARN of a recovery point is supplied, then that recovery point is restored.

", + "documentation":"

Recovers the saved resource identified by an Amazon Resource Name (ARN).

", "idempotent":true }, "StopBackupJob":{ @@ -772,6 +786,21 @@ "documentation":"

Updates an existing backup plan identified by its backupPlanId with the input document in JSON format. The new version is uniquely identified by a VersionId.

", "idempotent":true }, + "UpdateGlobalSettings":{ + "name":"UpdateGlobalSettings", + "http":{ + "method":"PUT", + "requestUri":"/global-settings" + }, + "input":{"shape":"UpdateGlobalSettingsInput"}, + "errors":[ + {"shape":"ServiceUnavailableException"}, + {"shape":"MissingParameterValueException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

Updates the current global settings for the AWS Account. Use the DescribeGlobalSettings API to determine the current settings.

" + }, "UpdateRecoveryPointLifecycle":{ "name":"UpdateRecoveryPointLifecycle", "http":{ @@ -801,7 +830,7 @@ {"shape":"MissingParameterValueException"}, {"shape":"InvalidParameterValueException"} ], - "documentation":"

Updates the current service opt-in settings for the Region. If the service has a value set to true, AWS Backup tries to protect that service's resources in this Region, when included in an on-demand backup or scheduled backup plan. If the value is set to false for a service, AWS Backup does not try to protect that service's resources in this Region.

" + "documentation":"

Updates the current service opt-in settings for the Region. If service-opt-in is enabled for a service, AWS Backup tries to protect that service's resources in this Region, when the resource is included in an on-demand backup or scheduled backup plan. Otherwise, AWS Backup does not try to protect that service's resources in this Region. Use the DescribeRegionSettings API to determine the resource types that are supported.

" } }, "shapes":{ @@ -819,7 +848,7 @@ }, "BackupOptions":{ "shape":"BackupOptions", - "documentation":"

Specifies the backup option for a selected resource. This option is only available for Windows VSS backup jobs.

Valid value: \"WindowsVSS”:“enabled\". If enabled, creates a VSS Windows backup; otherwise, creates a regular backup.

If you specify an invalid option, you get an InvalidParameterValueException exception.

For more information about Windows VSS backups, see Creating a VSS-Enabled Windows Backup.

" + "documentation":"

Specifies the backup option for a selected resource. This option is only available for Windows VSS backup jobs.

Valid values:

Set to \"WindowsVSS\":\"enabled\" to enable the WindowsVSS backup option and create a VSS Windows backup.

Set to \"WindowsVSS\":\"disabled\" to create a regular backup. The WindowsVSS option is not enabled by default.

If you specify an invalid option, you get an InvalidParameterValueException exception.

For more information about Windows VSS backups, see Creating a VSS-Enabled Windows Backup.

" } }, "documentation":"

A list of backup options for each resource type.

" @@ -930,7 +959,7 @@ }, "BackupOptions":{ "shape":"BackupOptions", - "documentation":"

Specifies the backup option for a selected resource. This option is only available for Windows VSS backup jobs.

Valid value: \"WindowsVSS”:“enabled\". If enabled, creates a VSS Windows backup; otherwise, creates a regular backup. If you specify an invalid option, you get an InvalidParameterValueException exception.

" + "documentation":"

Specifies the backup option for a selected resource. This option is only available for Windows VSS backup jobs.

Valid values: Set to \"WindowsVSS”:“enabled\" to enable WindowsVSS backup option and create a VSS Windows backup. Set to “WindowsVSS”:”disabled” to create a regular backup. If you specify an invalid option, you get an InvalidParameterValueException exception.

" }, "BackupType":{ "shape":"string", @@ -1099,7 +1128,7 @@ }, "ScheduleExpression":{ "shape":"CronExpression", - "documentation":"

A CRON expression specifying when AWS Backup initiates a backup job.

" + "documentation":"

A CRON expression specifying when AWS Backup initiates a backup job. For more information about cron expressions, see Schedule Expressions for Rules in the Amazon CloudWatch Events User Guide.. Prior to specifying a value for this parameter, we recommend testing your cron expression using one of the many available cron generator and testing tools.

" }, "StartWindowMinutes":{ "shape":"WindowMinutes", @@ -1791,7 +1820,7 @@ }, "BackupType":{ "shape":"string", - "documentation":"

Represents the actual backup type selected for a backup job. For example, if a successful WindowsVSS backup was taken, BackupType returns “WindowsVSS”. If BackupType is empty, then it is a regular backup.

" + "documentation":"

Represents the actual backup type selected for a backup job. For example, if a successful WindowsVSS backup was taken, BackupType returns \"WindowsVSS\". If BackupType is empty, then the backup type that was is a regular backup.

" } } }, @@ -1857,6 +1886,24 @@ } } }, + "DescribeGlobalSettingsInput":{ + "type":"structure", + "members":{ + } + }, + "DescribeGlobalSettingsOutput":{ + "type":"structure", + "members":{ + "GlobalSettings":{ + "shape":"GlobalSettings", + "documentation":"

A list of resources along with the opt-in preferences for the account.

" + }, + "LastUpdateTime":{ + "shape":"timestamp", + "documentation":"

The date and time that the global settings was last updated. This update is in Unix format and Coordinated Universal Time (UTC). The value of LastUpdateTime is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + } + } + }, "DescribeProtectedResourceInput":{ "type":"structure", "required":["ResourceArn"], @@ -1922,6 +1969,10 @@ "shape":"ARN", "documentation":"

An ARN that uniquely identifies a backup vault; for example, arn:aws:backup:us-east-1:123456789012:vault:aBackupVault.

" }, + "SourceBackupVaultArn":{ + "shape":"ARN", + "documentation":"

An Amazon Resource Name (ARN) that uniquely identifies the source vault where the resource was originally backed up in; for example, arn:aws:backup:us-east-1:123456789012:vault:BackupVault. If the recovery is restored to the same AWS account or Region, this value will be null.

" + }, "ResourceArn":{ "shape":"ARN", "documentation":"

An ARN that uniquely identifies a saved resource. The format of the ARN depends on the resource type.

" @@ -2338,6 +2389,13 @@ } } }, + "GlobalSettings":{ + "type":"map", + "key":{"shape":"GlobalSettingsName"}, + "value":{"shape":"GlobalSettingsValue"} + }, + "GlobalSettingsName":{"type":"string"}, + "GlobalSettingsValue":{"type":"string"}, "IAMPolicy":{"type":"string"}, "IAMRoleArn":{"type":"string"}, "InvalidParameterValueException":{ @@ -3057,6 +3115,10 @@ "shape":"ARN", "documentation":"

An ARN that uniquely identifies a backup vault; for example, arn:aws:backup:us-east-1:123456789012:vault:aBackupVault.

" }, + "SourceBackupVaultArn":{ + "shape":"ARN", + "documentation":"

The backup vault where the recovery point was originally copied from. If the recovery point is restored to the same account this value will be null.

" + }, "ResourceArn":{ "shape":"ARN", "documentation":"

An ARN that uniquely identifies a resource. The format of the ARN depends on the resource type.

" @@ -3348,7 +3410,7 @@ }, "BackupOptions":{ "shape":"BackupOptions", - "documentation":"

Specifies the backup option for a selected resource. This option is only available for Windows VSS backup jobs.

Valid value: \"WindowsVSS”:“enabled\". If enabled, creates a VSS Windows backup; otherwise, creates a regular backup.

" + "documentation":"

Specifies the backup option for a selected resource. This option is only available for Windows VSS backup jobs.

Valid values: Set to \"WindowsVSS”:“enabled\" to enable WindowsVSS backup option and create a VSS Windows backup. Set to “WindowsVSS”:”disabled” to create a regular backup. The WindowsVSS option is not enabled by default.

" } } }, @@ -3428,7 +3490,7 @@ }, "Metadata":{ "shape":"Metadata", - "documentation":"

A set of metadata key-value pairs. Contains information, such as a resource name, required to restore a recovery point.

You can get configuration metadata about a resource at the time it was backed up by calling GetRecoveryPointRestoreMetadata. However, values in addition to those provided by GetRecoveryPointRestoreMetadata might be required to restore a resource. For example, you might need to provide a new resource name if the original already exists.

You need to specify specific metadata to restore an Amazon Elastic File System (Amazon EFS) instance:

  • file-system-id: The ID of the Amazon EFS file system that is backed up by AWS Backup. Returned in GetRecoveryPointRestoreMetadata.

  • Encrypted: A Boolean value that, if true, specifies that the file system is encrypted. If KmsKeyId is specified, Encrypted must be set to true.

  • KmsKeyId: Specifies the AWS KMS key that is used to encrypt the restored file system.

  • PerformanceMode: Specifies the throughput mode of the file system.

  • CreationToken: A user-supplied value that ensures the uniqueness (idempotency) of the request.

  • newFileSystem: A Boolean value that, if true, specifies that the recovery point is restored to a new Amazon EFS file system.

" + "documentation":"

A set of metadata key-value pairs. Contains information, such as a resource name, required to restore a recovery point.

You can get configuration metadata about a resource at the time it was backed up by calling GetRecoveryPointRestoreMetadata. However, values in addition to those provided by GetRecoveryPointRestoreMetadata might be required to restore a resource. For example, you might need to provide a new resource name if the original already exists.

You need to specify specific metadata to restore an Amazon Elastic File System (Amazon EFS) instance:

  • file-system-id: The ID of the Amazon EFS file system that is backed up by AWS Backup. Returned in GetRecoveryPointRestoreMetadata.

  • Encrypted: A Boolean value that, if true, specifies that the file system is encrypted. If KmsKeyId is specified, Encrypted must be set to true.

  • KmsKeyId: Specifies the AWS KMS key that is used to encrypt the restored file system. You can specify a key from another AWS account provided that key it is properly shared with your account via AWS KMS.

  • PerformanceMode: Specifies the throughput mode of the file system.

  • CreationToken: A user-supplied value that ensures the uniqueness (idempotency) of the request.

  • newFileSystem: A Boolean value that, if true, specifies that the recovery point is restored to a new Amazon EFS file system.

  • ItemsToRestore : A serialized list of up to five strings where each string is a file path. Use ItemsToRestore to restore specific files or directories rather than the entire file system. This parameter is optional.

" }, "IamRoleArn":{ "shape":"IAMRoleArn", @@ -3568,6 +3630,15 @@ } } }, + "UpdateGlobalSettingsInput":{ + "type":"structure", + "members":{ + "GlobalSettings":{ + "shape":"GlobalSettings", + "documentation":"

A list of resources along with the opt-in preferences for the account.

" + } + } + }, "UpdateRecoveryPointLifecycleInput":{ "type":"structure", "required":[ diff --git a/services/batch/pom.xml b/services/batch/pom.xml index ba34c82fa40f..53099c9eb45e 100644 --- a/services/batch/pom.xml +++ b/services/batch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT batch AWS Java SDK :: Services :: AWS Batch diff --git a/services/batch/src/main/resources/codegen-resources/service-2.json b/services/batch/src/main/resources/codegen-resources/service-2.json index c4df3ebbef5d..5fd469086c06 100644 --- a/services/batch/src/main/resources/codegen-resources/service-2.json +++ b/services/batch/src/main/resources/codegen-resources/service-2.json @@ -24,7 +24,7 @@ {"shape":"ClientException"}, {"shape":"ServerException"} ], - "documentation":"

Cancels a job in an AWS Batch job queue. Jobs that are in the SUBMITTED, PENDING, or RUNNABLE state are cancelled. Jobs that have progressed to STARTING or RUNNING are not cancelled (but the API operation still succeeds, even if no job is cancelled); these jobs must be terminated with the TerminateJob operation.

" + "documentation":"

Cancels a job in an AWS Batch job queue. Jobs that are in the SUBMITTED, PENDING, or RUNNABLE state are canceled. Jobs that have progressed to STARTING or RUNNING are not canceled (but the API operation still succeeds, even if no job is canceled); these jobs must be terminated with the TerminateJob operation.

" }, "CreateComputeEnvironment":{ "name":"CreateComputeEnvironment", @@ -38,7 +38,7 @@ {"shape":"ClientException"}, {"shape":"ServerException"} ], - "documentation":"

Creates an AWS Batch compute environment. You can create MANAGED or UNMANAGED compute environments.

In a managed compute environment, AWS Batch manages the capacity and instance types of the compute resources within the environment. This is based on the compute resource specification that you define or the launch template that you specify when you create the compute environment. You can choose to use Amazon EC2 On-Demand Instances or Spot Instances in your managed compute environment. You can optionally set a maximum price so that Spot Instances only launch when the Spot Instance price is below a specified percentage of the On-Demand price.

Multi-node parallel jobs are not supported on Spot Instances.

In an unmanaged compute environment, you can manage your own compute resources. This provides more compute resource configuration options, such as using a custom AMI, but you must ensure that your AMI meets the Amazon ECS container instance AMI specification. For more information, see Container Instance AMIs in the Amazon Elastic Container Service Developer Guide. After you have created your unmanaged compute environment, you can use the DescribeComputeEnvironments operation to find the Amazon ECS cluster that is associated with it. Then, manually launch your container instances into that Amazon ECS cluster. For more information, see Launching an Amazon ECS Container Instance in the Amazon Elastic Container Service Developer Guide.

AWS Batch does not upgrade the AMIs in a compute environment after it is created (for example, when a newer version of the Amazon ECS-optimized AMI is available). You are responsible for the management of the guest operating system (including updates and security patches) and any additional application software or utilities that you install on the compute resources. To use a new AMI for your AWS Batch jobs:

  1. Create a new compute environment with the new AMI.

  2. Add the compute environment to an existing job queue.

  3. Remove the old compute environment from your job queue.

  4. Delete the old compute environment.

" + "documentation":"

Creates an AWS Batch compute environment. You can create MANAGED or UNMANAGED compute environments. MANAGED compute environments can use Amazon EC2 or AWS Fargate resources. UNMANAGED compute environments can only use EC2 resources.

In a managed compute environment, AWS Batch manages the capacity and instance types of the compute resources within the environment. This is based on the compute resource specification that you define or the launch template that you specify when you create the compute environment. You can choose either to use EC2 On-Demand Instances and EC2 Spot Instances, or to use Fargate and Fargate Spot capacity in your managed compute environment. You can optionally set a maximum price so that Spot Instances only launch when the Spot Instance price is below a specified percentage of the On-Demand price.

Multi-node parallel jobs are not supported on Spot Instances.

In an unmanaged compute environment, you can manage your own EC2 compute resources and have a lot of flexibility with how you configure your compute resources. For example, you can use custom AMI. However, you need to verify that your AMI meets the Amazon ECS container instance AMI specification. For more information, see container instance AMIs in the Amazon Elastic Container Service Developer Guide. After you have created your unmanaged compute environment, you can use the DescribeComputeEnvironments operation to find the Amazon ECS cluster that is associated with it. Then, manually launch your container instances into that Amazon ECS cluster. For more information, see Launching an Amazon ECS container instance in the Amazon Elastic Container Service Developer Guide.

AWS Batch doesn't upgrade the AMIs in a compute environment after it's created. For example, it doesn't update the AMIs when a newer version of the Amazon ECS-optimized AMI is available. Therefore, you're responsible for the management of the guest operating system (including updates and security patches) and any additional application software or utilities that you install on the compute resources. To use a new AMI for your AWS Batch jobs, complete these steps:

  1. Create a new compute environment with the new AMI.

  2. Add the compute environment to an existing job queue.

  3. Remove the earlier compute environment from your job queue.

  4. Delete the earlier compute environment.

" }, "CreateJobQueue":{ "name":"CreateJobQueue", @@ -66,7 +66,7 @@ {"shape":"ClientException"}, {"shape":"ServerException"} ], - "documentation":"

Deletes an AWS Batch compute environment.

Before you can delete a compute environment, you must set its state to DISABLED with the UpdateComputeEnvironment API operation and disassociate it from any job queues with the UpdateJobQueue API operation.

" + "documentation":"

Deletes an AWS Batch compute environment.

Before you can delete a compute environment, you must set its state to DISABLED with the UpdateComputeEnvironment API operation and disassociate it from any job queues with the UpdateJobQueue API operation. Compute environments that use AWS Fargate resources must terminate all active jobs on that compute environment before deleting the compute environment. If this isn't done, the compute environment will end up in an invalid state.

" }, "DeleteJobQueue":{ "name":"DeleteJobQueue", @@ -80,7 +80,7 @@ {"shape":"ClientException"}, {"shape":"ServerException"} ], - "documentation":"

Deletes the specified job queue. You must first disable submissions for a queue with the UpdateJobQueue operation. All jobs in the queue are terminated when you delete a job queue.

It is not necessary to disassociate compute environments from a queue before submitting a DeleteJobQueue request.

" + "documentation":"

Deletes the specified job queue. You must first disable submissions for a queue with the UpdateJobQueue operation. All jobs in the queue are terminated when you delete a job queue.

It's not necessary to disassociate compute environments from a queue before submitting a DeleteJobQueue request.

" }, "DeregisterJobDefinition":{ "name":"DeregisterJobDefinition", @@ -94,7 +94,7 @@ {"shape":"ClientException"}, {"shape":"ServerException"} ], - "documentation":"

Deregisters an AWS Batch job definition. Job definitions will be permanently deleted after 180 days.

" + "documentation":"

Deregisters an AWS Batch job definition. Job definitions are permanently deleted after 180 days.

" }, "DescribeComputeEnvironments":{ "name":"DescribeComputeEnvironments", @@ -108,7 +108,7 @@ {"shape":"ClientException"}, {"shape":"ServerException"} ], - "documentation":"

Describes one or more of your compute environments.

If you are using an unmanaged compute environment, you can use the DescribeComputeEnvironment operation to determine the ecsClusterArn that you should launch your Amazon ECS container instances into.

" + "documentation":"

Describes one or more of your compute environments.

If you're using an unmanaged compute environment, you can use the DescribeComputeEnvironment operation to determine the ecsClusterArn that you should launch your Amazon ECS container instances into.

" }, "DescribeJobDefinitions":{ "name":"DescribeJobDefinitions", @@ -164,7 +164,7 @@ {"shape":"ClientException"}, {"shape":"ServerException"} ], - "documentation":"

Returns a list of AWS Batch jobs.

You must specify only one of the following:

  • a job queue ID to return a list of jobs in that job queue

  • a multi-node parallel job ID to return a list of that job's nodes

  • an array job ID to return a list of that job's children

You can filter the results by job status with the jobStatus parameter. If you do not specify a status, only RUNNING jobs are returned.

" + "documentation":"

Returns a list of AWS Batch jobs.

You must specify only one of the following items:

  • A job queue ID to return a list of jobs in that job queue

  • A multi-node parallel job ID to return a list of that job's nodes

  • An array job ID to return a list of that job's children

You can filter the results by job status with the jobStatus parameter. If you don't specify a status, only RUNNING jobs are returned.

" }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -178,7 +178,7 @@ {"shape":"ClientException"}, {"shape":"ServerException"} ], - "documentation":"

List the tags for an AWS Batch resource. AWS Batch resources that support tags are compute environments, jobs, job definitions, and job queues. ARNs for child jobs of array and multi-node parallel (MNP) jobs are not supported.

" + "documentation":"

Lists the tags for an AWS Batch resource. AWS Batch resources that support tags are compute environments, jobs, job definitions, and job queues. ARNs for child jobs of array and multi-node parallel (MNP) jobs are not supported.

" }, "RegisterJobDefinition":{ "name":"RegisterJobDefinition", @@ -206,7 +206,7 @@ {"shape":"ClientException"}, {"shape":"ServerException"} ], - "documentation":"

Submits an AWS Batch job from a job definition. Parameters specified during SubmitJob override parameters defined in the job definition.

" + "documentation":"

Submits an AWS Batch job from a job definition. Parameters specified during SubmitJob override parameters defined in the job definition.

Jobs run on Fargate resources don't run for more than 14 days. After 14 days, the Fargate resources might no longer be available and the job is terminated.

" }, "TagResource":{ "name":"TagResource", @@ -220,7 +220,7 @@ {"shape":"ClientException"}, {"shape":"ServerException"} ], - "documentation":"

Associates the specified tags to a resource with the specified resourceArn. If existing tags on a resource are not specified in the request parameters, they are not changed. When a resource is deleted, the tags associated with that resource are deleted as well. AWS Batch resources that support tags are compute environments, jobs, job definitions, and job queues. ARNs for child jobs of array and multi-node parallel (MNP) jobs are not supported.

" + "documentation":"

Associates the specified tags to a resource with the specified resourceArn. If existing tags on a resource aren't specified in the request parameters, they aren't changed. When a resource is deleted, the tags associated with that resource are deleted as well. AWS Batch resources that support tags are compute environments, jobs, job definitions, and job queues. ARNs for child jobs of array and multi-node parallel (MNP) jobs are not supported.

" }, "TerminateJob":{ "name":"TerminateJob", @@ -334,6 +334,13 @@ }, "documentation":"

An object representing the array properties of a job.

" }, + "AssignPublicIp":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, "AttemptContainerDetail":{ "type":"structure", "members":{ @@ -428,7 +435,9 @@ "type":"string", "enum":[ "EC2", - "SPOT" + "SPOT", + "FARGATE", + "FARGATE_SPOT" ] }, "CancelJobRequest":{ @@ -458,7 +467,7 @@ "members":{ "message":{"shape":"String"} }, - "documentation":"

These errors are usually caused by a client action, such as using an action or resource on behalf of a user that doesn't have permissions to use the action or resource, or specifying an identifier that is not valid.

", + "documentation":"

These errors are usually caused by a client action, such as using an action or resource on behalf of a user that doesn't have permissions to use the action or resource, or specifying an identifier that isn't valid.

", "error":{"httpStatusCode":400}, "exception":true }, @@ -488,11 +497,11 @@ }, "type":{ "shape":"CEType", - "documentation":"

The type of the compute environment.

" + "documentation":"

The type of compute environment: EC2, SPOT, FARGATE, or FARGATE_SPOT. For more information, see Compute Environments in the AWS Batch User Guide.

" }, "state":{ "shape":"CEState", - "documentation":"

The state of the compute environment. The valid values are ENABLED or DISABLED.

If the state is ENABLED, then the AWS Batch scheduler can attempt to place jobs from an associated job queue on the compute resources within the environment. If the compute environment is managed, then it can scale its instances out or in automatically, based on the job queue demand.

If the state is DISABLED, then the AWS Batch scheduler does not attempt to place jobs within the environment. Jobs in a STARTING or RUNNING state continue to progress normally. Managed compute environments in the DISABLED state do not scale out. However, they scale in to minvCpus value after instances become idle.

" + "documentation":"

The state of the compute environment. The valid values are ENABLED or DISABLED.

If the state is ENABLED, then the AWS Batch scheduler can attempt to place jobs from an associated job queue on the compute resources within the environment. If the compute environment is managed, then it can scale its instances out or in automatically, based on the job queue demand.

If the state is DISABLED, then the AWS Batch scheduler doesn't attempt to place jobs within the environment. Jobs in a STARTING or RUNNING state continue to progress normally. Managed compute environments in the DISABLED state do not scale out. However, they scale in to minvCpus value after instances become idle.

" }, "status":{ "shape":"CEStatus", @@ -526,7 +535,7 @@ "members":{ "order":{ "shape":"Integer", - "documentation":"

The order of the compute environment.

" + "documentation":"

The order of the compute environment. Compute environments are tried in ascending order. For example, if two compute environments are associated with a job queue, the compute environment with a lower order integer value is tried for job placement first.

" }, "computeEnvironment":{ "shape":"String", @@ -543,76 +552,79 @@ "type":"structure", "required":[ "type", - "minvCpus", "maxvCpus", - "instanceTypes", - "subnets", - "instanceRole" + "subnets" ], "members":{ "type":{ "shape":"CRType", - "documentation":"

The type of compute environment: EC2 or SPOT.

" + "documentation":"

The type of compute environment: EC2, SPOT, FARGATE, or FARGATE_SPOT. For more information, see Compute Environments in the AWS Batch User Guide.

" }, "allocationStrategy":{ "shape":"CRAllocationStrategy", - "documentation":"

The allocation strategy to use for the compute resource in case not enough instances of the best fitting instance type can be allocated. This could be due to availability of the instance type in the region or Amazon EC2 service limits. If this is not specified, the default is BEST_FIT, which will use only the best fitting instance type, waiting for additional capacity if it's not available. This allocation strategy keeps costs lower but can limit scaling. If you are using Spot Fleets with BEST_FIT then the Spot Fleet IAM Role must be specified. BEST_FIT_PROGRESSIVE will select additional instance types that are large enough to meet the requirements of the jobs in the queue, with a preference for instance types with a lower cost per vCPU. SPOT_CAPACITY_OPTIMIZED is only available for Spot Instance compute resources and will select additional instance types that are large enough to meet the requirements of the jobs in the queue, with a preference for instance types that are less likely to be interrupted. For more information, see Allocation Strategies in the AWS Batch User Guide.

" + "documentation":"

The allocation strategy to use for the compute resource if not enough instances of the best fitting instance type can be allocated. This might be because of availability of the instance type in the Region or Amazon EC2 service limits. For more information, see Allocation Strategies in the AWS Batch User Guide.

This parameter isn't applicable to jobs running on Fargate resources, and shouldn't be specified.

BEST_FIT (default)

AWS Batch selects an instance type that best fits the needs of the jobs with a preference for the lowest-cost instance type. If additional instances of the selected instance type aren't available, AWS Batch will wait for the additional instances to be available. If there are not enough instances available, or if the user is hitting Amazon EC2 service limits then additional jobs aren't run until currently running jobs have completed. This allocation strategy keeps costs lower but can limit scaling. If you are using Spot Fleets with BEST_FIT then the Spot Fleet IAM Role must be specified.

BEST_FIT_PROGRESSIVE

AWS Batch will select additional instance types that are large enough to meet the requirements of the jobs in the queue, with a preference for instance types with a lower cost per unit vCPU. If additional instances of the previously selected instance types aren't available, AWS Batch will select new instance types.

SPOT_CAPACITY_OPTIMIZED

AWS Batch will select one or more instance types that are large enough to meet the requirements of the jobs in the queue, with a preference for instance types that are less likely to be interrupted. This allocation strategy is only available for Spot Instance compute resources.

With both BEST_FIT_PROGRESSIVE and SPOT_CAPACITY_OPTIMIZED strategies, AWS Batch might need to go above maxvCpus to meet your capacity requirements. In this event, AWS Batch never exceeds maxvCpus by more than a single instance.

" }, "minvCpus":{ "shape":"Integer", - "documentation":"

The minimum number of Amazon EC2 vCPUs that an environment should maintain (even if the compute environment is DISABLED).

" + "documentation":"

The minimum number of Amazon EC2 vCPUs that an environment should maintain (even if the compute environment is DISABLED).

This parameter isn't applicable to jobs running on Fargate resources, and shouldn't be specified.

" }, "maxvCpus":{ "shape":"Integer", - "documentation":"

The maximum number of Amazon EC2 vCPUs that an environment can reach.

" + "documentation":"

The maximum number of Amazon EC2 vCPUs that an environment can reach.

With both BEST_FIT_PROGRESSIVE and SPOT_CAPACITY_OPTIMIZED allocation strategies, AWS Batch may need to go above maxvCpus to meet your capacity requirements. In this event, AWS Batch will never go above maxvCpus by more than a single instance (e.g., no more than a single instance from among those specified in your compute environment).

" }, "desiredvCpus":{ "shape":"Integer", - "documentation":"

The desired number of Amazon EC2 vCPUS in the compute environment.

" + "documentation":"

The desired number of Amazon EC2 vCPUS in the compute environment. AWS Batch modifies this value between the minimum and maximum values, based on job queue demand.

This parameter isn't applicable to jobs running on Fargate resources, and shouldn't be specified.

" }, "instanceTypes":{ "shape":"StringList", - "documentation":"

The instances types that may be launched. You can specify instance families to launch any instance type within those families (for example, c5 or p3), or you can specify specific sizes within a family (such as c5.8xlarge). You can also choose optimal to pick instance types (from the C, M, and R instance families) on the fly that match the demand of your job queues.

" + "documentation":"

The instances types that may be launched. You can specify instance families to launch any instance type within those families (for example, c5 or p3), or you can specify specific sizes within a family (such as c5.8xlarge). You can also choose optimal to select instance types (from the C, M, and R instance families) on the fly that match the demand of your job queues.

This parameter isn't applicable to jobs running on Fargate resources, and shouldn't be specified.

When you create a compute environment, the instance types that you select for the compute environment must share the same architecture. For example, you can't mix x86 and ARM instances in the same compute environment.

" }, "imageId":{ "shape":"String", - "documentation":"

The Amazon Machine Image (AMI) ID used for instances launched in the compute environment.

" + "documentation":"

The Amazon Machine Image (AMI) ID used for instances launched in the compute environment. This parameter is overridden by the imageIdOverride member of the Ec2Configuration structure.

This parameter isn't applicable to jobs running on Fargate resources, and shouldn't be specified.

The AMI that you choose for a compute environment must match the architecture of the instance types that you intend to use for that compute environment. For example, if your compute environment uses A1 instance types, the compute resource AMI that you choose must support ARM instances. Amazon ECS vends both x86 and ARM versions of the Amazon ECS-optimized Amazon Linux 2 AMI. For more information, see Amazon ECS-optimized Amazon Linux 2 AMI in the Amazon Elastic Container Service Developer Guide.

", + "deprecated":true, + "deprecatedMessage":"This field is deprecated, use ec2Configuration[].imageIdOverride instead." }, "subnets":{ "shape":"StringList", - "documentation":"

The VPC subnets into which the compute resources are launched. For more information, see VPCs and Subnets in the Amazon VPC User Guide.

" + "documentation":"

The VPC subnets into which the compute resources are launched. These subnets must be within the same VPC. This parameter is required for jobs running on Fargate resources, where it can contain up to 16 subnets. For more information, see VPCs and Subnets in the Amazon VPC User Guide.

" }, "securityGroupIds":{ "shape":"StringList", - "documentation":"

The Amazon EC2 security groups associated with instances launched in the compute environment. One or more security groups must be specified, either in securityGroupIds or using a launch template referenced in launchTemplate. If security groups are specified using both securityGroupIds and launchTemplate, the values in securityGroupIds will be used.

" + "documentation":"

The Amazon EC2 security groups associated with instances launched in the compute environment. One or more security groups must be specified, either in securityGroupIds or using a launch template referenced in launchTemplate. This parameter is required for jobs running on Fargate resources and must contain at least one security group. (Fargate does not support launch templates.) If security groups are specified using both securityGroupIds and launchTemplate, the values in securityGroupIds will be used.

" }, "ec2KeyPair":{ "shape":"String", - "documentation":"

The Amazon EC2 key pair that is used for instances launched in the compute environment.

" + "documentation":"

The Amazon EC2 key pair that is used for instances launched in the compute environment. You can use this key pair to log in to your instances with SSH.

This parameter isn't applicable to jobs running on Fargate resources, and shouldn't be specified.

" }, "instanceRole":{ "shape":"String", - "documentation":"

The Amazon ECS instance profile applied to Amazon EC2 instances in a compute environment. You can specify the short name or full Amazon Resource Name (ARN) of an instance profile. For example, ecsInstanceRole or arn:aws:iam::<aws_account_id>:instance-profile/ecsInstanceRole . For more information, see Amazon ECS Instance Role in the AWS Batch User Guide.

" + "documentation":"

The Amazon ECS instance profile applied to Amazon EC2 instances in a compute environment. You can specify the short name or full Amazon Resource Name (ARN) of an instance profile. For example, ecsInstanceRole or arn:aws:iam::<aws_account_id>:instance-profile/ecsInstanceRole . For more information, see Amazon ECS Instance Role in the AWS Batch User Guide.

This parameter isn't applicable to jobs running on Fargate resources, and shouldn't be specified.

" }, "tags":{ "shape":"TagsMap", - "documentation":"

Key-value pair tags to be applied to resources that are launched in the compute environment. For AWS Batch, these take the form of \"String1\": \"String2\", where String1 is the tag key and String2 is the tag value—for example, { \"Name\": \"AWS Batch Instance - C4OnDemand\" }. These tags can not be updated or removed after the compute environment has been created; any changes require creating a new compute environment and removing the old compute environment. These tags are not seen when using the AWS Batch ListTagsForResource API operation.

" + "documentation":"

Key-value pair tags to be applied to EC2 resources that are launched in the compute environment. For AWS Batch, these take the form of \"String1\": \"String2\", where String1 is the tag key and String2 is the tag value—for example, { \"Name\": \"AWS Batch Instance - C4OnDemand\" }. This is helpful for recognizing your AWS Batch instances in the Amazon EC2 console. These tags can not be updated or removed after the compute environment has been created; any changes require creating a new compute environment and removing the old compute environment. These tags are not seen when using the AWS Batch ListTagsForResource API operation.

" }, "placementGroup":{ "shape":"String", - "documentation":"

The Amazon EC2 placement group to associate with your compute resources. If you intend to submit multi-node parallel jobs to your compute environment, you should consider creating a cluster placement group and associate it with your compute resources. This keeps your multi-node parallel job on a logical grouping of instances within a single Availability Zone with high network flow potential. For more information, see Placement Groups in the Amazon EC2 User Guide for Linux Instances.

" + "documentation":"

The Amazon EC2 placement group to associate with your compute resources. If you intend to submit multi-node parallel jobs to your compute environment, you should consider creating a cluster placement group and associate it with your compute resources. This keeps your multi-node parallel job on a logical grouping of instances within a single Availability Zone with high network flow potential. For more information, see Placement Groups in the Amazon EC2 User Guide for Linux Instances.

This parameter isn't applicable to jobs running on Fargate resources, and shouldn't be specified.

" }, "bidPercentage":{ "shape":"Integer", - "documentation":"

The maximum percentage that a Spot Instance price can be when compared with the On-Demand price for that instance type before instances are launched. For example, if your maximum percentage is 20%, then the Spot price must be below 20% of the current On-Demand price for that Amazon EC2 instance. You always pay the lowest (market) price and never more than your maximum percentage. If you leave this field empty, the default value is 100% of the On-Demand price.

" + "documentation":"

The maximum percentage that a Spot Instance price can be when compared with the On-Demand price for that instance type before instances are launched. For example, if your maximum percentage is 20%, then the Spot price must be below 20% of the current On-Demand price for that Amazon EC2 instance. You always pay the lowest (market) price and never more than your maximum percentage. If you leave this field empty, the default value is 100% of the On-Demand price.

This parameter isn't applicable to jobs running on Fargate resources, and shouldn't be specified.

" }, "spotIamFleetRole":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the Amazon EC2 Spot Fleet IAM role applied to a SPOT compute environment. This role is required if the allocation strategy set to BEST_FIT or if the allocation strategy is not specified. For more information, see Amazon EC2 Spot Fleet Role in the AWS Batch User Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of the Amazon EC2 Spot Fleet IAM role applied to a SPOT compute environment. This role is required if the allocation strategy set to BEST_FIT or if the allocation strategy isn't specified. For more information, see Amazon EC2 Spot Fleet Role in the AWS Batch User Guide.

This parameter isn't applicable to jobs running on Fargate resources, and shouldn't be specified.

To tag your Spot Instances on creation, the Spot Fleet IAM role specified here must use the newer AmazonEC2SpotFleetTaggingRole managed policy. The previously recommended AmazonEC2SpotFleetRole managed policy doesn't have the required permissions to tag Spot Instances. For more information, see Spot Instances not tagged on creation in the AWS Batch User Guide.

" }, "launchTemplate":{ "shape":"LaunchTemplateSpecification", - "documentation":"

The launch template to use for your compute resources. Any other compute resource parameters that you specify in a CreateComputeEnvironment API operation override the same parameters in the launch template. You must specify either the launch template ID or launch template name in the request, but not both. For more information, see Launch Template Support in the AWS Batch User Guide.

" + "documentation":"

The launch template to use for your compute resources. Any other compute resource parameters that you specify in a CreateComputeEnvironment API operation override the same parameters in the launch template. You must specify either the launch template ID or launch template name in the request, but not both. For more information, see Launch Template Support in the AWS Batch User Guide.

This parameter isn't applicable to jobs running on Fargate resources, and shouldn't be specified.

" + }, + "ec2Configuration":{ + "shape":"Ec2ConfigurationList", + "documentation":"

Provides information used to select Amazon Machine Images (AMIs) for instances in the compute environment. If Ec2Configuration isn't specified, the default is ECS_AL1.

This parameter isn't applicable to jobs running on Fargate resources, and shouldn't be specified.

" } }, "documentation":"

An object representing an AWS Batch compute resource.

" @@ -622,7 +634,7 @@ "members":{ "minvCpus":{ "shape":"Integer", - "documentation":"

The minimum number of Amazon EC2 vCPUs that an environment should maintain.

" + "documentation":"

The minimum number of Amazon EC2 vCPUs that an environment should maintain.

This parameter isnt applicable to jobs running on Fargate resources, and shouldn't be specified.

" }, "maxvCpus":{ "shape":"Integer", @@ -630,7 +642,15 @@ }, "desiredvCpus":{ "shape":"Integer", - "documentation":"

The desired number of Amazon EC2 vCPUS in the compute environment.

" + "documentation":"

The desired number of Amazon EC2 vCPUS in the compute environment.

This parameter isn't applicable to jobs running on Fargate resources, and shouldn't be specified.

" + }, + "subnets":{ + "shape":"StringList", + "documentation":"

The VPC subnets that the compute resources are launched into. This parameter is required for jobs running on Fargate compute resources, where it can contain up to 16 subnets. For more information, see VPCs and Subnets in the Amazon VPC User Guide. This can't be specified for EC2 compute resources. Providing an empty list will be handled as if this parameter wasn't specified and no change is made.

" + }, + "securityGroupIds":{ + "shape":"StringList", + "documentation":"

The Amazon EC2 security groups associated with instances launched in the compute environment. This parameter is required for Fargate compute resources, where it can contain up to 5 security groups. This can't be specified for EC2 compute resources. Providing an empty list is handled as if this parameter wasn't specified and no changeis made.

" } }, "documentation":"

An object representing the attributes of a compute environment that can be updated.

" @@ -644,11 +664,11 @@ }, "vcpus":{ "shape":"Integer", - "documentation":"

The number of VCPUs allocated for the job. This is a required parameter.

" + "documentation":"

The number of vCPUs reserved for the container. Jobs running on EC2 resources can specify the vCPU requirement for the job using resourceRequirements but the vCPU requirements can't be specified both here and in the resourceRequirement object. This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run. Each vCPU is equivalent to 1,024 CPU shares. You must specify at least one vCPU. This is required but can be specified in several places. It must be specified for each node at least once.

This parameter isn't applicable to jobs running on Fargate resources. Jobs running on Fargate resources must specify the vCPU requirement for the job using resourceRequirements.

" }, "memory":{ "shape":"Integer", - "documentation":"

The number of MiB of memory reserved for the job. This is a required parameter.

" + "documentation":"

For jobs run on EC2 resources that didn't specify memory requirements using ResourceRequirement, the number of MiB of memory reserved for the job. For other jobs, including all run on Fargate resources, see resourceRequirements.

" }, "command":{ "shape":"StringList", @@ -660,7 +680,7 @@ }, "executionRoleArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the execution role that AWS Batch can assume. For more information, see AWS Batch execution IAM role.

" + "documentation":"

The Amazon Resource Name (ARN) of the execution role that AWS Batch can assume. For more information, see AWS Batch execution IAM role in the AWS Batch User Guide.

" }, "volumes":{ "shape":"Volumes", @@ -676,19 +696,19 @@ }, "readonlyRootFilesystem":{ "shape":"Boolean", - "documentation":"

When this parameter is true, the container is given read-only access to its root file system.

" + "documentation":"

When this parameter is true, the container is given read-only access to its root file system. This parameter maps to ReadonlyRootfs in the Create a container section of the Docker Remote API and the --read-only option to docker run .

" }, "ulimits":{ "shape":"Ulimits", - "documentation":"

A list of ulimit values to set in the container.

" + "documentation":"

A list of ulimit values to set in the container. This parameter maps to Ulimits in the Create a container section of the Docker Remote API and the --ulimit option to docker run.

This parameter isn't applicable to jobs running on Fargate resources.

" }, "privileged":{ "shape":"Boolean", - "documentation":"

When this parameter is true, the container is given elevated privileges on the host container instance (similar to the root user).

" + "documentation":"

When this parameter is true, the container is given elevated permissions on the host container instance (similar to the root user). The default value is false.

This parameter isn't applicable to jobs running on Fargate resources and shouldn't be provided, or specified as false.

" }, "user":{ "shape":"String", - "documentation":"

The user name to use inside the container.

" + "documentation":"

The user name to use inside the container. This parameter maps to User in the Create a container section of the Docker Remote API and the --user option to docker run.

" }, "exitCode":{ "shape":"Integer", @@ -700,7 +720,7 @@ }, "containerInstanceArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the container instance on which the container is running.

" + "documentation":"

The Amazon Resource Name (ARN) of the container instance that the container is running on.

" }, "taskArn":{ "shape":"String", @@ -712,7 +732,7 @@ }, "instanceType":{ "shape":"String", - "documentation":"

The instance type of the underlying host infrastructure of a multi-node parallel job.

" + "documentation":"

The instance type of the underlying host infrastructure of a multi-node parallel job.

This parameter isn't applicable to jobs running on Fargate resources.

" }, "networkInterfaces":{ "shape":"NetworkInterfaceList", @@ -720,7 +740,7 @@ }, "resourceRequirements":{ "shape":"ResourceRequirements", - "documentation":"

The type and amount of a resource to assign to a container. Currently, the only supported resource is GPU.

" + "documentation":"

The type and amount of resources to assign to a container. The supported resources include GPU, MEMORY, and VCPU.

" }, "linuxParameters":{ "shape":"LinuxParameters", @@ -728,11 +748,19 @@ }, "logConfiguration":{ "shape":"LogConfiguration", - "documentation":"

The log configuration specification for the container.

This parameter maps to LogConfig in the Create a container section of the Docker Remote API and the --log-driver option to docker run. By default, containers use the same logging driver that the Docker daemon uses. However the container may use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information on the options for different supported log drivers, see Configure logging drivers in the Docker documentation.

AWS Batch currently supports a subset of the logging drivers available to the Docker daemon (shown in the LogConfiguration data type). Additional log drivers may be available in future releases of the Amazon ECS container agent.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log into your container instance and run the following command: sudo docker version | grep \"Server API version\"

The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The log configuration specification for the container.

This parameter maps to LogConfig in the Create a container section of the Docker Remote API and the --log-driver option to docker run. By default, containers use the same logging driver that the Docker daemon uses. However the container might use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance. Or, alternatively, it must be configured on a different log server for remote logging options. For more information on the options for different supported log drivers, see Configure logging drivers in the Docker documentation.

AWS Batch currently supports a subset of the logging drivers available to the Docker daemon (shown in the LogConfiguration data type). Additional log drivers may be available in future releases of the Amazon ECS container agent.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log into your container instance and run the following command: sudo docker version | grep \"Server API version\"

The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

" }, "secrets":{ "shape":"SecretList", - "documentation":"

The secrets to pass to the container. For more information, see Specifying Sensitive Data in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The secrets to pass to the container. For more information, see Specifying sensitive data in the AWS Batch User Guide.

" + }, + "networkConfiguration":{ + "shape":"NetworkConfiguration", + "documentation":"

The network configuration for jobs running on Fargate resources. Jobs running on EC2 resources must not specify this parameter.

" + }, + "fargatePlatformConfiguration":{ + "shape":"FargatePlatformConfiguration", + "documentation":"

The platform configuration for jobs running on Fargate resources. Jobs running on EC2 resources must not specify this parameter.

" } }, "documentation":"

An object representing the details of a container that is part of a job.

" @@ -742,11 +770,15 @@ "members":{ "vcpus":{ "shape":"Integer", - "documentation":"

The number of vCPUs to reserve for the container. This value overrides the value set in the job definition.

" + "documentation":"

This parameter is deprecated and not supported for jobs run on Fargate resources, see resourceRequirement. For jobs run on EC2 resources, the number of vCPUs to reserve for the container. This value overrides the value set in the job definition. Jobs run on EC2 resources can specify the vCPU requirement using resourceRequirement but the vCPU requirements can't be specified both here and in resourceRequirement. This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run. Each vCPU is equivalent to 1,024 CPU shares. You must specify at least one vCPU.

This parameter isn't applicable to jobs running on Fargate resources and shouldn't be provided. Jobs running on Fargate resources must specify the vCPU requirement for the job using resourceRequirements.

", + "deprecated":true, + "deprecatedMessage":"This field is deprecated, use resourceRequirements instead." }, "memory":{ "shape":"Integer", - "documentation":"

The number of MiB of memory reserved for the job. This value overrides the value set in the job definition.

" + "documentation":"

This parameter is deprecated and not supported for jobs run on Fargate resources, use ResourceRequirement. For jobs run on EC2 resource, the number of MiB of memory reserved for the job. This value overrides the value set in the job definition.

", + "deprecated":true, + "deprecatedMessage":"This field is deprecated, use resourceRequirements instead." }, "command":{ "shape":"StringList", @@ -754,7 +786,7 @@ }, "instanceType":{ "shape":"String", - "documentation":"

The instance type to use for a multi-node parallel job. This parameter is not valid for single-node container jobs.

" + "documentation":"

The instance type to use for a multi-node parallel job.

This parameter isn't applicable to single-node container jobs or for jobs running on Fargate resources and shouldn't be provided.

" }, "environment":{ "shape":"EnvironmentVariables", @@ -762,7 +794,7 @@ }, "resourceRequirements":{ "shape":"ResourceRequirements", - "documentation":"

The type and amount of a resource to assign to a container. This value overrides the value set in the job definition. Currently, the only supported resource is GPU.

" + "documentation":"

The type and amount of resources to assign to a container. This overrides the settings in the job definition. The supported resources include GPU, MEMORY, and VCPU.

" } }, "documentation":"

The overrides that should be sent to a container.

" @@ -772,15 +804,19 @@ "members":{ "image":{ "shape":"String", - "documentation":"

The image used to start a container. This string is passed directly to the Docker daemon. Images in the Docker Hub registry are available by default. Other repositories are specified with repository-url/image:tag . Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to Image in the Create a container section of the Docker Remote API and the IMAGE parameter of docker run.

  • Images in Amazon ECR repositories use the full registry and repository URI (for example, 012345678910.dkr.ecr.<region-name>.amazonaws.com/<repository-name>).

  • Images in official repositories on Docker Hub use a single name (for example, ubuntu or mongo).

  • Images in other repositories on Docker Hub are qualified with an organization name (for example, amazon/amazon-ecs-agent).

  • Images in other online repositories are qualified further by a domain name (for example, quay.io/assemblyline/ubuntu).

" + "documentation":"

The image used to start a container. This string is passed directly to the Docker daemon. Images in the Docker Hub registry are available by default. Other repositories are specified with repository-url/image:tag . Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to Image in the Create a container section of the Docker Remote API and the IMAGE parameter of docker run.

Docker image architecture must match the processor architecture of the compute resources that they're scheduled on. For example, ARM-based Docker images can only run on ARM-based compute resources.

  • Images in Amazon ECR repositories use the full registry and repository URI (for example, 012345678910.dkr.ecr.<region-name>.amazonaws.com/<repository-name>).

  • Images in official repositories on Docker Hub use a single name (for example, ubuntu or mongo).

  • Images in other repositories on Docker Hub are qualified with an organization name (for example, amazon/amazon-ecs-agent).

  • Images in other online repositories are qualified further by a domain name (for example, quay.io/assemblyline/ubuntu).

" }, "vcpus":{ "shape":"Integer", - "documentation":"

The number of vCPUs reserved for the container. This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run. Each vCPU is equivalent to 1,024 CPU shares. You must specify at least one vCPU. This is required but can be specified in several places for multi-node parallel (MNP) jobs; it must be specified for each node at least once.

" + "documentation":"

This parameter is deprecated and not supported for jobs run on Fargate resources, see resourceRequirement. The number of vCPUs reserved for the container. Jobs running on EC2 resources can specify the vCPU requirement for the job using resourceRequirements but the vCPU requirements can't be specified both here and in the resourceRequirement structure. This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run. Each vCPU is equivalent to 1,024 CPU shares. You must specify at least one vCPU. This is required but can be specified in several places. It must be specified for each node at least once.

This parameter isn't applicable to jobs running on Fargate resources and shouldn't be provided. Jobs running on Fargate resources must specify the vCPU requirement for the job using resourceRequirements.

", + "deprecated":true, + "deprecatedMessage":"This field is deprecated, use resourceRequirements instead." }, "memory":{ "shape":"Integer", - "documentation":"

The hard limit (in MiB) of memory to present to the container. If your container attempts to exceed the memory specified here, the container is killed. This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run. You must specify at least 4 MiB of memory for a job. This is required but can be specified in several places for multi-node parallel (MNP) jobs; it must be specified for each node at least once.

If you are trying to maximize your resource utilization by providing your jobs as much memory as possible for a particular instance type, see Memory Management in the AWS Batch User Guide.

" + "documentation":"

This parameter is deprecated and not supported for jobs run on Fargate resources, use ResourceRequirement. For jobs run on EC2 resources can specify the memory requirement using the ResourceRequirement structure. The hard limit (in MiB) of memory to present to the container. If your container attempts to exceed the memory specified here, the container is killed. This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run. You must specify at least 4 MiB of memory for a job. This is required but can be specified in several places; it must be specified for each node at least once.

If you're trying to maximize your resource utilization by providing your jobs as much memory as possible for a particular instance type, see Memory Management in the AWS Batch User Guide.

", + "deprecated":true, + "deprecatedMessage":"This field is deprecated, use resourceRequirements instead." }, "command":{ "shape":"StringList", @@ -788,11 +824,11 @@ }, "jobRoleArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the IAM role that the container can assume for AWS permissions.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM role that the container can assume for AWS permissions. For more information, see IAM Roles for Tasks in the Amazon Elastic Container Service Developer Guide.

" }, "executionRoleArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the execution role that AWS Batch can assume. For more information, see AWS Batch execution IAM role.

" + "documentation":"

The Amazon Resource Name (ARN) of the execution role that AWS Batch can assume. Jobs running on Fargate resources must provide an execution role. For more information, see AWS Batch execution IAM role in the AWS Batch User Guide.

" }, "volumes":{ "shape":"Volumes", @@ -800,7 +836,7 @@ }, "environment":{ "shape":"EnvironmentVariables", - "documentation":"

The environment variables to pass to a container. This parameter maps to Env in the Create a container section of the Docker Remote API and the --env option to docker run.

We do not recommend using plaintext environment variables for sensitive information, such as credential data.

Environment variables must not start with AWS_BATCH; this naming convention is reserved for variables that are set by the AWS Batch service.

" + "documentation":"

The environment variables to pass to a container. This parameter maps to Env in the Create a container section of the Docker Remote API and the --env option to docker run.

We don't recommend using plaintext environment variables for sensitive information, such as credential data.

Environment variables must not start with AWS_BATCH; this naming convention is reserved for variables that are set by the AWS Batch service.

" }, "mountPoints":{ "shape":"MountPoints", @@ -812,11 +848,11 @@ }, "privileged":{ "shape":"Boolean", - "documentation":"

When this parameter is true, the container is given elevated privileges on the host container instance (similar to the root user). This parameter maps to Privileged in the Create a container section of the Docker Remote API and the --privileged option to docker run.

" + "documentation":"

When this parameter is true, the container is given elevated permissions on the host container instance (similar to the root user). This parameter maps to Privileged in the Create a container section of the Docker Remote API and the --privileged option to docker run. The default value is false.

This parameter isn't applicable to jobs running on Fargate resources and shouldn't be provided, or specified as false.

" }, "ulimits":{ "shape":"Ulimits", - "documentation":"

A list of ulimits to set in the container. This parameter maps to Ulimits in the Create a container section of the Docker Remote API and the --ulimit option to docker run.

" + "documentation":"

A list of ulimits to set in the container. This parameter maps to Ulimits in the Create a container section of the Docker Remote API and the --ulimit option to docker run.

This parameter isn't applicable to jobs running on Fargate resources and shouldn't be provided.

" }, "user":{ "shape":"String", @@ -824,11 +860,11 @@ }, "instanceType":{ "shape":"String", - "documentation":"

The instance type to use for a multi-node parallel job. Currently all node groups in a multi-node parallel job must use the same instance type. This parameter is not valid for single-node container jobs.

" + "documentation":"

The instance type to use for a multi-node parallel job. All node groups in a multi-node parallel job must use the same instance type.

This parameter isn't applicable to single-node container jobs or for jobs running on Fargate resources and shouldn't be provided.

" }, "resourceRequirements":{ "shape":"ResourceRequirements", - "documentation":"

The type and amount of a resource to assign to a container. Currently, the only supported resource is GPU.

" + "documentation":"

The type and amount of resources to assign to a container. The supported resources include GPU, MEMORY, and VCPU.

" }, "linuxParameters":{ "shape":"LinuxParameters", @@ -836,11 +872,19 @@ }, "logConfiguration":{ "shape":"LogConfiguration", - "documentation":"

The log configuration specification for the container.

This parameter maps to LogConfig in the Create a container section of the Docker Remote API and the --log-driver option to docker run. By default, containers use the same logging driver that the Docker daemon uses. However the container may use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information on the options for different supported log drivers, see Configure logging drivers in the Docker documentation.

AWS Batch currently supports a subset of the logging drivers available to the Docker daemon (shown in the LogConfiguration data type).

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log into your container instance and run the following command: sudo docker version | grep \"Server API version\"

The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The log configuration specification for the container.

This parameter maps to LogConfig in the Create a container section of the Docker Remote API and the --log-driver option to docker run. By default, containers use the same logging driver that the Docker daemon uses. However the container might use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information on the options for different supported log drivers, see Configure logging drivers in the Docker documentation.

AWS Batch currently supports a subset of the logging drivers available to the Docker daemon (shown in the LogConfiguration data type).

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log into your container instance and run the following command: sudo docker version | grep \"Server API version\"

The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

" }, "secrets":{ "shape":"SecretList", - "documentation":"

The secrets for the container. For more information, see Specifying Sensitive Data in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The secrets for the container. For more information, see Specifying sensitive data in the AWS Batch User Guide.

" + }, + "networkConfiguration":{ + "shape":"NetworkConfiguration", + "documentation":"

The network configuration for jobs running on Fargate resources. Jobs running on EC2 resources must not specify this parameter.

" + }, + "fargatePlatformConfiguration":{ + "shape":"FargatePlatformConfiguration", + "documentation":"

The platform configuration for jobs running on Fargate resources. Jobs running on EC2 resources must not specify this parameter.

" } }, "documentation":"

Container properties are used in job definitions to describe the container that is launched as part of a job.

" @@ -873,7 +917,7 @@ }, "type":{ "shape":"CEType", - "documentation":"

The type of the compute environment. For more information, see Compute Environments in the AWS Batch User Guide.

" + "documentation":"

The type of the compute environment: MANAGED or UNMANAGED. For more information, see Compute Environments in the AWS Batch User Guide.

" }, "state":{ "shape":"CEState", @@ -881,15 +925,15 @@ }, "computeResources":{ "shape":"ComputeResource", - "documentation":"

Details of the compute resources managed by the compute environment. This parameter is required for managed compute environments. For more information, see Compute Environments in the AWS Batch User Guide.

" + "documentation":"

Details about the compute resources managed by the compute environment. This parameter is required for managed compute environments. For more information, see Compute Environments in the AWS Batch User Guide.

" }, "serviceRole":{ "shape":"String", - "documentation":"

The full Amazon Resource Name (ARN) of the IAM role that allows AWS Batch to make calls to other AWS services on your behalf.

If your specified role has a path other than /, then you must either specify the full role ARN (this is recommended) or prefix the role name with the path.

Depending on how you created your AWS Batch service role, its ARN may contain the service-role path prefix. When you only specify the name of the service role, AWS Batch assumes that your ARN does not use the service-role path prefix. Because of this, we recommend that you specify the full ARN of your service role when you create compute environments.

" + "documentation":"

The full Amazon Resource Name (ARN) of the IAM role that allows AWS Batch to make calls to other AWS services on your behalf.

If your specified role has a path other than /, then you must either specify the full role ARN (this is recommended) or prefix the role name with the path.

Depending on how you created your AWS Batch service role, its ARN may contain the service-role path prefix. When you only specify the name of the service role, AWS Batch assumes that your ARN doesn't use the service-role path prefix. Because of this, we recommend that you specify the full ARN of your service role when you create compute environments.

" }, "tags":{ "shape":"TagrisTagsMap", - "documentation":"

The tags that you apply to the compute environment to help you categorize and organize your resources. Each tag consists of a key and an optional value. For more information, see Tagging AWS Resources in AWS General Reference.

These tags can be updated or removed using the TagResource and UntagResource API operations. These tags do not propagate to the underlying compute resources.

" + "documentation":"

The tags that you apply to the compute environment to help you categorize and organize your resources. Each tag consists of a key and an optional value. For more information, see Tagging AWS Resources in AWS General Reference.

These tags can be updated or removed using the TagResource and UntagResource API operations. These tags don't propagate to the underlying compute resources.

" } } }, @@ -920,15 +964,15 @@ }, "state":{ "shape":"JQState", - "documentation":"

The state of the job queue. If the job queue state is ENABLED, it is able to accept jobs. If the job queue state is DISABLED, new jobs cannot be added to the queue, but jobs already in the queue can finish.

" + "documentation":"

The state of the job queue. If the job queue state is ENABLED, it is able to accept jobs. If the job queue state is DISABLED, new jobs can't be added to the queue, but jobs already in the queue can finish.

" }, "priority":{ "shape":"Integer", - "documentation":"

The priority of the job queue. Job queues with a higher priority (or a higher integer value for the priority parameter) are evaluated first when associated with the same compute environment. Priority is determined in descending order, for example, a job queue with a priority value of 10 is given scheduling preference over a job queue with a priority value of 1.

" + "documentation":"

The priority of the job queue. Job queues with a higher priority (or a higher integer value for the priority parameter) are evaluated first when associated with the same compute environment. Priority is determined in descending order. For example, a job queue with a priority value of 10 is given scheduling preference over a job queue with a priority value of 1.

" }, "computeEnvironmentOrder":{ "shape":"ComputeEnvironmentOrders", - "documentation":"

The set of compute environments mapped to a job queue and their order relative to each other. The job scheduler uses this parameter to determine which compute environment should execute a given job. Compute environments must be in the VALID state before you can associate them with a job queue. You can associate up to three compute environments with a job queue.

" + "documentation":"

The set of compute environments mapped to a job queue and their order relative to each other. The job scheduler uses this parameter to determine which compute environment should run a specific job. Compute environments must be in the VALID state before you can associate them with a job queue. You can associate up to three compute environments with a job queue. All of the compute environments must be either EC2 (EC2 or SPOT) or Fargate (FARGATE or FARGATE_SPOT); EC2 and Fargate compute environments can't be mixed.

" }, "tags":{ "shape":"TagrisTagsMap", @@ -1007,7 +1051,7 @@ }, "maxResults":{ "shape":"Integer", - "documentation":"

The maximum number of cluster results returned by DescribeComputeEnvironments in paginated output. When this parameter is used, DescribeComputeEnvironments only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another DescribeComputeEnvironments request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, then DescribeComputeEnvironments returns up to 100 results and a nextToken value if applicable.

" + "documentation":"

The maximum number of cluster results returned by DescribeComputeEnvironments in paginated output. When this parameter is used, DescribeComputeEnvironments only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another DescribeComputeEnvironments request with the returned nextToken value. This value can be between 1 and 100. If this parameter isn't used, then DescribeComputeEnvironments returns up to 100 results and a nextToken value if applicable.

" }, "nextToken":{ "shape":"String", @@ -1037,7 +1081,7 @@ }, "maxResults":{ "shape":"Integer", - "documentation":"

The maximum number of results returned by DescribeJobDefinitions in paginated output. When this parameter is used, DescribeJobDefinitions only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another DescribeJobDefinitions request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, then DescribeJobDefinitions returns up to 100 results and a nextToken value if applicable.

" + "documentation":"

The maximum number of results returned by DescribeJobDefinitions in paginated output. When this parameter is used, DescribeJobDefinitions only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another DescribeJobDefinitions request with the returned nextToken value. This value can be between 1 and 100. If this parameter isn't used, then DescribeJobDefinitions returns up to 100 results and a nextToken value if applicable.

" }, "jobDefinitionName":{ "shape":"String", @@ -1045,7 +1089,7 @@ }, "status":{ "shape":"String", - "documentation":"

The status with which to filter job definitions.

" + "documentation":"

The status used to filter job definitions.

" }, "nextToken":{ "shape":"String", @@ -1075,7 +1119,7 @@ }, "maxResults":{ "shape":"Integer", - "documentation":"

The maximum number of results returned by DescribeJobQueues in paginated output. When this parameter is used, DescribeJobQueues only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another DescribeJobQueues request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, then DescribeJobQueues returns up to 100 results and a nextToken value if applicable.

" + "documentation":"

The maximum number of results returned by DescribeJobQueues in paginated output. When this parameter is used, DescribeJobQueues only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another DescribeJobQueues request with the returned nextToken value. This value can be between 1 and 100. If this parameter isn't used, then DescribeJobQueues returns up to 100 results and a nextToken value if applicable.

" }, "nextToken":{ "shape":"String", @@ -1125,14 +1169,14 @@ }, "containerPath":{ "shape":"String", - "documentation":"

The path inside the container at which to expose the host device. By default the hostPath value is used.

" + "documentation":"

The path inside the container used to expose the host device. By default the hostPath value is used.

" }, "permissions":{ "shape":"DeviceCgroupPermissions", "documentation":"

The explicit permissions to provide to the container for the device. By default, the container has permissions for read, write, and mknod for the device.

" } }, - "documentation":"

An object representing a container instance host device.

" + "documentation":"

An object representing a container instance host device.

This object isn't applicable to jobs running on Fargate resources and shouldn't be provided.

" }, "DeviceCgroupPermission":{ "type":"string", @@ -1150,19 +1194,85 @@ "type":"list", "member":{"shape":"Device"} }, + "Ec2Configuration":{ + "type":"structure", + "required":["imageType"], + "members":{ + "imageType":{ + "shape":"ImageType", + "documentation":"

The image type to match with the instance type to select an AMI. If the imageIdOverride parameter isn't specified, then a recent Amazon ECS-optimized AMI is used.

ECS_AL2

Amazon Linux 2− Default for all AWS Graviton-based instance families (for example, C6g, M6g, R6g, and T4g) and can be used for all non-GPU instance types.

ECS_AL2_NVIDIA

Amazon Linux 2 (GPU)−Default for all GPU instance families (for example P4 and G4) and can be used for all non-AWS Graviton-based instance types.

ECS_AL1

Amazon Linux−Default for all non-GPU, non-AWS-Graviton instance families. Amazon Linux is reaching the end-of-life of standard support. For more information, see Amazon Linux AMI.

" + }, + "imageIdOverride":{ + "shape":"ImageIdOverride", + "documentation":"

The AMI ID used for instances launched in the compute environment that match the image type. This setting overrides the imageId set in the computeResource object.

" + } + }, + "documentation":"

Provides information used to select Amazon Machine Images (AMIs) for instances in the compute environment. If the Ec2Configuration isn't specified, the default is ECS_AL1.

This object isn't applicable to jobs running on Fargate resources.

" + }, + "Ec2ConfigurationList":{ + "type":"list", + "member":{"shape":"Ec2Configuration"} + }, "EnvironmentVariables":{ "type":"list", "member":{"shape":"KeyValuePair"} }, + "EvaluateOnExit":{ + "type":"structure", + "required":["action"], + "members":{ + "onStatusReason":{ + "shape":"String", + "documentation":"

Contains a glob pattern to match against the StatusReason returned for a job. The patten can be up to 512 characters long, can contain letters, numbers, periods (.), colons (:), and white space (spaces, tabs). and can optionally end with an asterisk (*) so that only the start of the string needs to be an exact match.

" + }, + "onReason":{ + "shape":"String", + "documentation":"

Contains a glob pattern to match against the Reason returned for a job. The patten can be up to 512 characters long, can contain letters, numbers, periods (.), colons (:), and white space (spaces, tabs), and can optionally end with an asterisk (*) so that only the start of the string needs to be an exact match.

" + }, + "onExitCode":{ + "shape":"String", + "documentation":"

Contains a glob pattern to match against the decimal representation of the ExitCode returned for a job. The patten can be up to 512 characters long, can contain only numbers, and can optionally end with an asterisk (*) so that only the start of the string needs to be an exact match.

" + }, + "action":{ + "shape":"RetryAction", + "documentation":"

Specifies the action to take if all of the specified conditions (onStatusReason, onReason, and onExitCode) are met. The values are not case sensitive.

" + } + }, + "documentation":"

Specifies a set of conditions to be met, and an action to take (RETRY or EXIT) if all conditions are met.

" + }, + "EvaluateOnExitList":{ + "type":"list", + "member":{"shape":"EvaluateOnExit"} + }, + "FargatePlatformConfiguration":{ + "type":"structure", + "members":{ + "platformVersion":{ + "shape":"String", + "documentation":"

The AWS Fargate platform version on which the jobs are running. A platform version is specified only for jobs running on Fargate resources. If one isn't specified, the LATEST platform version is used by default. This will use a recent, approved version of the AWS Fargate platform for compute resources. For more information, see AWS Fargate platform versions in the Amazon Elastic Container Service Developer Guide.

" + } + }, + "documentation":"

The platform configuration for jobs running on Fargate resources. Jobs running on EC2 resources must not specify this parameter.

" + }, "Host":{ "type":"structure", "members":{ "sourcePath":{ "shape":"String", - "documentation":"

The path on the host container instance that is presented to the container. If this parameter is empty, then the Docker daemon has assigned a host path for you. If this parameter contains a file location, then the data volume persists at the specified location on the host container instance until you delete it manually. If the source path location does not exist on the host container instance, the Docker daemon creates it. If the location does exist, the contents of the source path folder are exported.

" + "documentation":"

The path on the host container instance that is presented to the container. If this parameter is empty, then the Docker daemon has assigned a host path for you. If this parameter contains a file location, then the data volume persists at the specified location on the host container instance until you delete it manually. If the source path location does not exist on the host container instance, the Docker daemon creates it. If the location does exist, the contents of the source path folder are exported.

This parameter isn't applicable to jobs running on Fargate resources and shouldn't be provided.

" } }, - "documentation":"

Determine whether your data volume persists on the host container instance and where it is stored. If this parameter is empty, then the Docker daemon assigns a host path for your data volume, but the data is not guaranteed to persist after the containers associated with it stop running.

" + "documentation":"

Determine whether your data volume persists on the host container instance and where it is stored. If this parameter is empty, then the Docker daemon assigns a host path for your data volume, but the data isn't guaranteed to persist after the containers associated with it stop running.

" + }, + "ImageIdOverride":{ + "type":"string", + "max":256, + "min":1 + }, + "ImageType":{ + "type":"string", + "max":256, + "min":1 }, "Integer":{"type":"integer"}, "JQState":{ @@ -1210,7 +1320,7 @@ }, "type":{ "shape":"String", - "documentation":"

The type of job definition.

" + "documentation":"

The type of job definition. If the job is run on Fargate resources, then multinode isn't supported. For more information about multi-node parallel jobs, see Creating a multi-node parallel job definition in the AWS Batch User Guide.

" }, "parameters":{ "shape":"ParametersMap", @@ -1226,15 +1336,23 @@ }, "timeout":{ "shape":"JobTimeout", - "documentation":"

The timeout configuration for jobs that are submitted with this job definition. You can specify a timeout duration after which AWS Batch terminates your jobs if they have not finished.

" + "documentation":"

The timeout configuration for jobs that are submitted with this job definition. You can specify a timeout duration after which AWS Batch terminates your jobs if they haven't finished.

" }, "nodeProperties":{ "shape":"NodeProperties", - "documentation":"

An object with various properties specific to multi-node parallel jobs.

" + "documentation":"

An object with various properties specific to multi-node parallel jobs.

If the job runs on Fargate resources, then you must not specify nodeProperties; use containerProperties instead.

" }, "tags":{ "shape":"TagrisTagsMap", "documentation":"

The tags applied to the job definition.

" + }, + "propagateTags":{ + "shape":"Boolean", + "documentation":"

Specifies whether to propagate the tags from the job or job definition to the corresponding Amazon ECS task. If no value is specified, the tags aren't propagated. Tags can only be propagated to the tasks during task creation. For tags with the same name, job tags are given priority over job definitions tags. If the total number of combined tags from the job and job definition is over 50, the job is moved to the FAILED state.

" + }, + "platformCapabilities":{ + "shape":"PlatformCapabilityList", + "documentation":"

The platform capabilities required by the job definition. If no value is specified, it defaults to EC2. Jobs run on Fargate resources specify FARGATE.

" } }, "documentation":"

An object representing an AWS Batch job definition.

" @@ -1293,7 +1411,7 @@ }, "jobQueue":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the job queue with which the job is associated.

" + "documentation":"

The Amazon Resource Name (ARN) of the job queue that the job is associated with.

" }, "status":{ "shape":"JobStatus", @@ -1317,7 +1435,7 @@ }, "startedAt":{ "shape":"Long", - "documentation":"

The Unix timestamp (in milliseconds) for when the job was started (when the job transitioned from the STARTING state to the RUNNING state). This parameter is not provided for child jobs of array jobs or multi-node parallel jobs.

" + "documentation":"

The Unix timestamp (in milliseconds) for when the job was started (when the job transitioned from the STARTING state to the RUNNING state). This parameter isn't provided for child jobs of array jobs or multi-node parallel jobs.

" }, "stoppedAt":{ "shape":"Long", @@ -1325,7 +1443,7 @@ }, "dependsOn":{ "shape":"JobDependencyList", - "documentation":"

A list of job IDs on which this job depends.

" + "documentation":"

A list of job IDs that this job depends on.

" }, "jobDefinition":{ "shape":"String", @@ -1337,7 +1455,7 @@ }, "container":{ "shape":"ContainerDetail", - "documentation":"

An object representing the details of the container that is associated with the job.

" + "documentation":"

An object representing the details of the container that's associated with the job.

" }, "nodeDetails":{ "shape":"NodeDetails", @@ -1345,7 +1463,7 @@ }, "nodeProperties":{ "shape":"NodeProperties", - "documentation":"

An object representing the node properties of a multi-node parallel job.

" + "documentation":"

An object representing the node properties of a multi-node parallel job.

This isn't applicable to jobs running on Fargate resources.

" }, "arrayProperties":{ "shape":"ArrayPropertiesDetail", @@ -1358,6 +1476,14 @@ "tags":{ "shape":"TagrisTagsMap", "documentation":"

The tags applied to the job.

" + }, + "propagateTags":{ + "shape":"Boolean", + "documentation":"

Specifies whether to propagate the tags from the job or job definition to the corresponding Amazon ECS task. If no value is specified, the tags are not propagated. Tags can only be propagated to the tasks during task creation. For tags with the same name, job tags are given priority over job definitions tags. If the total number of combined tags from the job and job definition is over 50, the job is moved to the FAILED state.

" + }, + "platformCapabilities":{ + "shape":"PlatformCapabilityList", + "documentation":"

The platform capabilities required by the job definition. If no value is specified, it defaults to EC2. Jobs run on Fargate resources specify FARGATE.

" } }, "documentation":"

An object representing an AWS Batch job.

" @@ -1386,7 +1512,7 @@ }, "state":{ "shape":"JQState", - "documentation":"

Describes the ability of the queue to accept new jobs. If the job queue state is ENABLED, it is able to accept jobs. If the job queue state is DISABLED, new jobs cannot be added to the queue, but jobs already in the queue can finish.

" + "documentation":"

Describes the ability of the queue to accept new jobs. If the job queue state is ENABLED, it's able to accept jobs. If the job queue state is DISABLED, new jobs can't be added to the queue, but jobs already in the queue can finish.

" }, "status":{ "shape":"JQStatus", @@ -1402,7 +1528,7 @@ }, "computeEnvironmentOrder":{ "shape":"ComputeEnvironmentOrders", - "documentation":"

The compute environments that are attached to the job queue and the order in which job placement is preferred. Compute environments are selected for job placement in ascending order.

" + "documentation":"

The compute environments that are attached to the job queue and the order that job placement is preferred. Compute environments are selected for job placement in ascending order.

" }, "tags":{ "shape":"TagrisTagsMap", @@ -1476,7 +1602,7 @@ }, "nodeProperties":{ "shape":"NodePropertiesSummary", - "documentation":"

The node properties for a single node in a job summary list.

" + "documentation":"

The node properties for a single node in a job summary list.

This isn't applicable to jobs running on Fargate resources.

" } }, "documentation":"

An object representing summary details of a job.

" @@ -1490,7 +1616,7 @@ "members":{ "attemptDurationSeconds":{ "shape":"Integer", - "documentation":"

The time duration in seconds (measured from the job attempt's startedAt timestamp) after which AWS Batch terminates your jobs if they have not finished.

" + "documentation":"

The time duration in seconds (measured from the job attempt's startedAt timestamp) after which AWS Batch terminates your jobs if they have not finished. The minimum value for the timeout is 60 seconds.

" } }, "documentation":"

An object representing a job timeout configuration.

" @@ -1525,14 +1651,14 @@ "documentation":"

The version number of the launch template, $Latest, or $Default.

If the value is $Latest, the latest version of the launch template is used. If the value is $Default, the default version of the launch template is used.

Default: $Default.

" } }, - "documentation":"

An object representing a launch template associated with a compute resource. You must specify either the launch template ID or launch template name in the request, but not both.

" + "documentation":"

An object representing a launch template associated with a compute resource. You must specify either the launch template ID or launch template name in the request, but not both.

If security groups are specified using both the securityGroupIds parameter of CreateComputeEnvironment and the launch template, the values in the securityGroupIds parameter of CreateComputeEnvironment will be used.

This object isn't applicable to jobs running on Fargate resources.

" }, "LinuxParameters":{ "type":"structure", "members":{ "devices":{ "shape":"DevicesList", - "documentation":"

Any host devices to expose to the container. This parameter maps to Devices in the Create a container section of the Docker Remote API and the --device option to docker run.

" + "documentation":"

Any host devices to expose to the container. This parameter maps to Devices in the Create a container section of the Docker Remote API and the --device option to docker run.

This parameter isn't applicable to jobs running on Fargate resources and shouldn't be provided.

" }, "initProcessEnabled":{ "shape":"Boolean", @@ -1540,19 +1666,19 @@ }, "sharedMemorySize":{ "shape":"Integer", - "documentation":"

The value for the size (in MiB) of the /dev/shm volume. This parameter maps to the --shm-size option to docker run.

" + "documentation":"

The value for the size (in MiB) of the /dev/shm volume. This parameter maps to the --shm-size option to docker run.

This parameter isn't applicable to jobs running on Fargate resources and shouldn't be provided.

" }, "tmpfs":{ "shape":"TmpfsList", - "documentation":"

The container path, mount options, and size (in MiB) of the tmpfs mount. This parameter maps to the --tmpfs option to docker run.

" + "documentation":"

The container path, mount options, and size (in MiB) of the tmpfs mount. This parameter maps to the --tmpfs option to docker run.

This parameter isn't applicable to jobs running on Fargate resources and shouldn't be provided.

" }, "maxSwap":{ "shape":"Integer", - "documentation":"

The total amount of swap memory (in MiB) a container can use. This parameter will be translated to the --memory-swap option to docker run where the value would be the sum of the container memory plus the maxSwap value.

If a maxSwap value of 0 is specified, the container will not use swap. Accepted values are 0 or any positive integer. If the maxSwap parameter is omitted, the container will use the swap configuration for the container instance it is running on. A maxSwap value must be set for the swappiness parameter to be used.

" + "documentation":"

The total amount of swap memory (in MiB) a container can use. This parameter is translated to the --memory-swap option to docker run where the value is the sum of the container memory plus the maxSwap value. For more information, see --memory-swap details in the Docker documentation.

If a maxSwap value of 0 is specified, the container doesn't use swap. Accepted values are 0 or any positive integer. If the maxSwap parameter is omitted, the container doesn't use the swap configuration for the container instance it is running on. A maxSwap value must be set for the swappiness parameter to be used.

This parameter isn't applicable to jobs running on Fargate resources and shouldn't be provided.

" }, "swappiness":{ "shape":"Integer", - "documentation":"

This allows you to tune a container's memory swappiness behavior. A swappiness value of 0 will cause swapping to not happen unless absolutely necessary. A swappiness value of 100 will cause pages to be swapped very aggressively. Accepted values are whole numbers between 0 and 100. If the swappiness parameter is not specified, a default value of 60 is used. If a value is not specified for maxSwap then this parameter is ignored. This parameter maps to the --memory-swappiness option to docker run.

" + "documentation":"

This allows you to tune a container's memory swappiness behavior. A swappiness value of 0 causes swapping not to happen unless absolutely necessary. A swappiness value of 100 causes pages to be swapped very aggressively. Accepted values are whole numbers between 0 and 100. If the swappiness parameter isn't specified, a default value of 60 is used. If a value isn't specified for maxSwap then this parameter is ignored. This parameter maps to the --memory-swappiness option to docker run.

This parameter isn't applicable to jobs running on Fargate resources and shouldn't be provided.

" } }, "documentation":"

Linux-specific modifications that are applied to the container, such as details for device mappings.

" @@ -1562,7 +1688,7 @@ "members":{ "jobQueue":{ "shape":"String", - "documentation":"

The name or full Amazon Resource Name (ARN) of the job queue with which to list jobs.

" + "documentation":"

The name or full Amazon Resource Name (ARN) of the job queue used to list jobs.

" }, "arrayJobId":{ "shape":"String", @@ -1574,11 +1700,11 @@ }, "jobStatus":{ "shape":"JobStatus", - "documentation":"

The job status with which to filter jobs in the specified queue. If you do not specify a status, only RUNNING jobs are returned.

" + "documentation":"

The job status used to filter jobs in the specified queue. If you don't specify a status, only RUNNING jobs are returned.

" }, "maxResults":{ "shape":"Integer", - "documentation":"

The maximum number of results returned by ListJobs in paginated output. When this parameter is used, ListJobs only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListJobs request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, then ListJobs returns up to 100 results and a nextToken value if applicable.

" + "documentation":"

The maximum number of results returned by ListJobs in paginated output. When this parameter is used, ListJobs only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListJobs request with the returned nextToken value. This value can be between 1 and 100. If this parameter isn't used, then ListJobs returns up to 100 results and a nextToken value if applicable.

" }, "nextToken":{ "shape":"String", @@ -1606,7 +1732,7 @@ "members":{ "resourceArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) that identifies the resource for which to list the tags. AWS Batch resources that support tags are compute environments, jobs, job definitions, and job queues. ARNs for child jobs of array and multi-node parallel (MNP) jobs are not supported.

", + "documentation":"

The Amazon Resource Name (ARN) that identifies the resource that tags are listed for. AWS Batch resources that support tags are compute environments, jobs, job definitions, and job queues. ARNs for child jobs of array and multi-node parallel (MNP) jobs are not supported.

", "location":"uri", "locationName":"resourceArn" } @@ -1627,7 +1753,7 @@ "members":{ "logDriver":{ "shape":"LogDriver", - "documentation":"

The log driver to use for the container. The valid values listed for this parameter are log drivers that the Amazon ECS container agent can communicate with by default.

The supported log drivers are awslogs, fluentd, gelf, json-file, journald, logentries, syslog, and splunk.

awslogs

Specifies the Amazon CloudWatch Logs logging driver. For more information, see Using the awslogs Log Driver in the AWS Batch User Guide and Amazon CloudWatch Logs logging driver in the Docker documentation.

fluentd

Specifies the Fluentd logging driver. For more information, including usage and options, see Fluentd logging driver in the Docker documentation.

gelf

Specifies the Graylog Extended Format (GELF) logging driver. For more information, including usage and options, see Graylog Extended Format logging driver in the Docker documentation.

journald

Specifies the journald logging driver. For more information, including usage and options, see Journald logging driver in the Docker documentation.

json-file

Specifies the JSON file logging driver. For more information, including usage and options, see JSON File logging driver in the Docker documentation.

splunk

Specifies the Splunk logging driver. For more information, including usage and options, see Splunk logging driver in the Docker documentation.

syslog

Specifies the syslog logging driver. For more information, including usage and options, see Syslog logging driver in the Docker documentation.

If you have a custom driver that is not listed earlier that you would like to work with the Amazon ECS container agent, you can fork the Amazon ECS container agent project that is available on GitHub and customize it to work with that driver. We encourage you to submit pull requests for changes that you would like to have included. However, Amazon Web Services does not currently support running modified copies of this software.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log into your container instance and run the following command: sudo docker version | grep \"Server API version\"

" + "documentation":"

The log driver to use for the container. The valid values listed for this parameter are log drivers that the Amazon ECS container agent can communicate with by default.

The supported log drivers are awslogs, fluentd, gelf, json-file, journald, logentries, syslog, and splunk.

Jobs running on Fargate resources are restricted to the awslogs and splunk log drivers.

awslogs

Specifies the Amazon CloudWatch Logs logging driver. For more information, see Using the awslogs Log Driver in the AWS Batch User Guide and Amazon CloudWatch Logs logging driver in the Docker documentation.

fluentd

Specifies the Fluentd logging driver. For more information, including usage and options, see Fluentd logging driver in the Docker documentation.

gelf

Specifies the Graylog Extended Format (GELF) logging driver. For more information, including usage and options, see Graylog Extended Format logging driver in the Docker documentation.

journald

Specifies the journald logging driver. For more information, including usage and options, see Journald logging driver in the Docker documentation.

json-file

Specifies the JSON file logging driver. For more information, including usage and options, see JSON File logging driver in the Docker documentation.

splunk

Specifies the Splunk logging driver. For more information, including usage and options, see Splunk logging driver in the Docker documentation.

syslog

Specifies the syslog logging driver. For more information, including usage and options, see Syslog logging driver in the Docker documentation.

If you have a custom driver that isn't listed earlier that you want to work with the Amazon ECS container agent, you can fork the Amazon ECS container agent project that is available on GitHub and customize it to work with that driver. We encourage you to submit pull requests for changes that you want to have included. However, Amazon Web Services doesn't currently support running modified copies of this software.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log into your container instance and run the following command: sudo docker version | grep \"Server API version\"

" }, "options":{ "shape":"LogConfigurationOptionsMap", @@ -1663,11 +1789,11 @@ "members":{ "containerPath":{ "shape":"String", - "documentation":"

The path on the container at which to mount the host volume.

" + "documentation":"

The path on the container where the host volume is mounted.

" }, "readOnly":{ "shape":"Boolean", - "documentation":"

If this value is true, the container has read-only access to the volume; otherwise, the container can write to the volume. The default value is false.

" + "documentation":"

If this value is true, the container has read-only access to the volume. Otherwise, the container can write to the volume. The default value is false.

" }, "sourceVolume":{ "shape":"String", @@ -1680,6 +1806,16 @@ "type":"list", "member":{"shape":"MountPoint"} }, + "NetworkConfiguration":{ + "type":"structure", + "members":{ + "assignPublicIp":{ + "shape":"AssignPublicIp", + "documentation":"

Indicates whether the job should have a public IP address. For a job running on Fargate resources in a private subnet to send outbound traffic to the internet (for example, in order to pull container images), the private subnet requires a NAT gateway be attached to route requests to the internet. For more information, see Amazon ECS task networking. The default value is \"DISABLED\".

" + } + }, + "documentation":"

The network configuration for jobs running on Fargate resources. Jobs running on EC2 resources must not specify this parameter.

" + }, "NetworkInterface":{ "type":"structure", "members":{ @@ -1728,7 +1864,7 @@ "documentation":"

The node property overrides for the job.

" } }, - "documentation":"

Object representing any node overrides to a job definition that is used in a SubmitJob API operation.

" + "documentation":"

Object representing any node overrides to a job definition that is used in a SubmitJob API operation.

This isn't applicable to jobs running on Fargate resources and shouldn't be provided; use containerOverrides instead.

" }, "NodeProperties":{ "type":"structure", @@ -1777,7 +1913,7 @@ "members":{ "targetNodes":{ "shape":"String", - "documentation":"

The range of nodes, using node index values, with which to override. A range of 0:3 indicates nodes with index values of 0 through 3. If the starting range value is omitted (:n), then 0 is used to start the range. If the ending range value is omitted (n:), then the highest possible node index is used to end the range.

" + "documentation":"

The range of nodes, using node index values, that's used to override. A range of 0:3 indicates nodes with index values of 0 through 3. If the starting range value is omitted (:n), then 0 is used to start the range. If the ending range value is omitted (n:), then the highest possible node index is used to end the range.

" }, "containerOverrides":{ "shape":"ContainerOverrides", @@ -1800,7 +1936,7 @@ "members":{ "targetNodes":{ "shape":"String", - "documentation":"

The range of nodes, using node index values. A range of 0:3 indicates nodes with index values of 0 through 3. If the starting range value is omitted (:n), then 0 is used to start the range. If the ending range value is omitted (n:), then the highest possible node index is used to end the range. Your accumulative node ranges must account for all nodes (0:n). You may nest node ranges, for example 0:10 and 4:5, in which case the 4:5 range properties override the 0:10 properties.

" + "documentation":"

The range of nodes, using node index values. A range of 0:3 indicates nodes with index values of 0 through 3. If the starting range value is omitted (:n), then 0 is used to start the range. If the ending range value is omitted (n:), then the highest possible node index is used to end the range. Your accumulative node ranges must account for all nodes (0:n). You may nest node ranges, for example 0:10 and 4:5, in which case the 4:5 range properties override the 0:10 properties.

" }, "container":{ "shape":"ContainerProperties", @@ -1814,6 +1950,17 @@ "key":{"shape":"String"}, "value":{"shape":"String"} }, + "PlatformCapability":{ + "type":"string", + "enum":[ + "EC2", + "FARGATE" + ] + }, + "PlatformCapabilityList":{ + "type":"list", + "member":{"shape":"PlatformCapability"} + }, "RegisterJobDefinitionRequest":{ "type":"structure", "required":[ @@ -1827,7 +1974,7 @@ }, "type":{ "shape":"JobDefinitionType", - "documentation":"

The type of job definition.

" + "documentation":"

The type of job definition. For more information about multi-node parallel jobs, see Creating a multi-node parallel job definition in the AWS Batch User Guide.

If the job is run on Fargate resources, then multinode isn't supported.

" }, "parameters":{ "shape":"ParametersMap", @@ -1835,23 +1982,31 @@ }, "containerProperties":{ "shape":"ContainerProperties", - "documentation":"

An object with various properties specific to single-node container-based jobs. If the job definition's type parameter is container, then you must specify either containerProperties or nodeProperties.

" + "documentation":"

An object with various properties specific to single-node container-based jobs. If the job definition's type parameter is container, then you must specify either containerProperties or nodeProperties.

If the job runs on Fargate resources, then you must not specify nodeProperties; use only containerProperties.

" }, "nodeProperties":{ "shape":"NodeProperties", - "documentation":"

An object with various properties specific to multi-node parallel jobs. If you specify node properties for a job, it becomes a multi-node parallel job. For more information, see Multi-node Parallel Jobs in the AWS Batch User Guide. If the job definition's type parameter is container, then you must specify either containerProperties or nodeProperties.

" + "documentation":"

An object with various properties specific to multi-node parallel jobs. If you specify node properties for a job, it becomes a multi-node parallel job. For more information, see Multi-node Parallel Jobs in the AWS Batch User Guide. If the job definition's type parameter is container, then you must specify either containerProperties or nodeProperties.

If the job runs on Fargate resources, then you must not specify nodeProperties; use containerProperties instead.

" }, "retryStrategy":{ "shape":"RetryStrategy", - "documentation":"

The retry strategy to use for failed jobs that are submitted with this job definition. Any retry strategy that is specified during a SubmitJob operation overrides the retry strategy defined here. If a job is terminated due to a timeout, it is not retried.

" + "documentation":"

The retry strategy to use for failed jobs that are submitted with this job definition. Any retry strategy that is specified during a SubmitJob operation overrides the retry strategy defined here. If a job is terminated due to a timeout, it isn't retried.

" + }, + "propagateTags":{ + "shape":"Boolean", + "documentation":"

Specifies whether to propagate the tags from the job or job definition to the corresponding Amazon ECS task. If no value is specified, the tags are not propagated. Tags can only be propagated to the tasks during task creation. For tags with the same name, job tags are given priority over job definitions tags. If the total number of combined tags from the job and job definition is over 50, the job is moved to the FAILED state.

" }, "timeout":{ "shape":"JobTimeout", - "documentation":"

The timeout configuration for jobs that are submitted with this job definition, after which AWS Batch terminates your jobs if they have not finished. If a job is terminated due to a timeout, it is not retried. The minimum value for the timeout is 60 seconds. Any timeout configuration that is specified during a SubmitJob operation overrides the timeout configuration defined here. For more information, see Job Timeouts in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The timeout configuration for jobs that are submitted with this job definition, after which AWS Batch terminates your jobs if they have not finished. If a job is terminated due to a timeout, it isn't retried. The minimum value for the timeout is 60 seconds. Any timeout configuration that is specified during a SubmitJob operation overrides the timeout configuration defined here. For more information, see Job Timeouts in the AWS Batch User Guide.

" }, "tags":{ "shape":"TagrisTagsMap", - "documentation":"

The tags that you apply to the job definition to help you categorize and organize your resources. Each tag consists of a key and an optional value. For more information, see Tagging AWS Resources in AWS General Reference.

" + "documentation":"

The tags that you apply to the job definition to help you categorize and organize your resources. Each tag consists of a key and an optional value. For more information, see Tagging AWS Resources in AWS Batch User Guide.

" + }, + "platformCapabilities":{ + "shape":"PlatformCapabilityList", + "documentation":"

The platform capabilities required by the job definition. If no value is specified, it defaults to EC2. To run the job on Fargate resources, specify FARGATE.

" } } }, @@ -1886,14 +2041,14 @@ "members":{ "value":{ "shape":"String", - "documentation":"

The number of physical GPUs to reserve for the container. The number of GPUs reserved for all containers in a job should not exceed the number of available GPUs on the compute resource that the job is launched on.

" + "documentation":"

The quantity of the specified resource to reserve for the container. The values vary based on the type specified.

type=\"GPU\"

The number of physical GPUs to reserve for the container. The number of GPUs reserved for all containers in a job shouldn't exceed the number of available GPUs on the compute resource that the job is launched on.

GPUs are not available for jobs running on Fargate resources.

type=\"MEMORY\"

For jobs running on EC2 resources, the hard limit (in MiB) of memory to present to the container. If your container attempts to exceed the memory specified here, the container is killed. This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run. You must specify at least 4 MiB of memory for a job. This is required but can be specified in several places for multi-node parallel (MNP) jobs. It must be specified for each node at least once. This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run. You must specify at least 4 MiB of memory for a job.

If you're trying to maximize your resource utilization by providing your jobs as much memory as possible for a particular instance type, see Memory Management in the AWS Batch User Guide.

For jobs running on Fargate resources, then value is the hard limit (in GiB), represented in decimal form, and must match one of the supported values (0.5 and whole numbers between 1 and 30, inclusive) and the VCPU values must be one of the values supported for that memory value.

value = 0.5

VCPU = 0.25

value = 1

VCPU = 0.25 or 0.5

value = 2

VCPU = 0.25, 0.5, or 1

value = 3

VCPU = 0.5, or 1

value = 4

VCPU = 0.5, 1, or 2

value = 5, 6, or 7

VCPU = 1 or 2

value = 8

VCPU = 1, 2, or 4

value = 9, 10, 11, 12, 13, 14, 15, or 16

VCPU = 2 or 4

value = 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, or 30

VCPU = 4

type=\"VCPU\"

The number of vCPUs reserved for the container. This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run. Each vCPU is equivalent to 1,024 CPU shares. You must specify at least one vCPU. This is required but can be specified in several places; it must be specified for each node at least once.

For jobs running on Fargate resources, then value must match one of the supported values and the MEMORY values must be one of the values supported for that VCPU value. The supported values are 0.25, 0.5, 1, 2, and 4

value = 0.25

MEMORY = 0.5, 1, or 2

value = 0.5

MEMORY = 1, 2, 3, or 4

value = 1

MEMORY = 2, 3, 4, 5, 6, 7, or 8

value = 2

MEMORY = 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, or 16

value = 4

MEMORY = 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, or 30

" }, "type":{ "shape":"ResourceType", - "documentation":"

The type of resource to assign to a container. Currently, the only supported resource type is GPU.

" + "documentation":"

The type of resource to assign to a container. The supported resources include GPU, MEMORY, and VCPU.

" } }, - "documentation":"

The type and amount of a resource to assign to a container. Currently, the only supported resource type is GPU.

" + "documentation":"

The type and amount of a resource to assign to a container. The supported resources include GPU, MEMORY, and VCPU.

" }, "ResourceRequirements":{ "type":"list", @@ -1901,7 +2056,18 @@ }, "ResourceType":{ "type":"string", - "enum":["GPU"] + "enum":[ + "GPU", + "VCPU", + "MEMORY" + ] + }, + "RetryAction":{ + "type":"string", + "enum":[ + "RETRY", + "EXIT" + ] }, "RetryStrategy":{ "type":"structure", @@ -1909,9 +2075,13 @@ "attempts":{ "shape":"Integer", "documentation":"

The number of times to move a job to the RUNNABLE status. You may specify between 1 and 10 attempts. If the value of attempts is greater than one, the job is retried on failure the same number of attempts as the value.

" + }, + "evaluateOnExit":{ + "shape":"EvaluateOnExitList", + "documentation":"

Array of up to 5 objects that specify conditions under which the job should be retried or failed. If this parameter is specified, then the attempts parameter must also be specified.

" } }, - "documentation":"

The retry strategy associated with a job.

" + "documentation":"

The retry strategy associated with a job. For more information, see Automated job retries in the AWS Batch User Guide.

" }, "Secret":{ "type":"structure", @@ -1926,10 +2096,10 @@ }, "valueFrom":{ "shape":"String", - "documentation":"

The secret to expose to the container. The supported values are either the full ARN of the AWS Secrets Manager secret or the full ARN of the parameter in the AWS Systems Manager Parameter Store.

If the AWS Systems Manager Parameter Store parameter exists in the same Region as the task you are launching, then you can use either the full ARN or name of the parameter. If the parameter exists in a different Region, then the full ARN must be specified.

" + "documentation":"

The secret to expose to the container. The supported values are either the full ARN of the AWS Secrets Manager secret or the full ARN of the parameter in the AWS Systems Manager Parameter Store.

If the AWS Systems Manager Parameter Store parameter exists in the same Region as the job you are launching, then you can use either the full ARN or name of the parameter. If the parameter exists in a different Region, then the full ARN must be specified.

" } }, - "documentation":"

An object representing the secret to expose to your container. Secrets can be exposed to a container in the following ways:

  • To inject sensitive data into your containers as environment variables, use the secrets container definition parameter.

  • To reference sensitive information in the log configuration of a container, use the secretOptions container definition parameter.

For more information, see Specifying Sensitive Data in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

An object representing the secret to expose to your container. Secrets can be exposed to a container in the following ways:

  • To inject sensitive data into your containers as environment variables, use the secrets container definition parameter.

  • To reference sensitive information in the log configuration of a container, use the secretOptions container definition parameter.

For more information, see Specifying sensitive data in the AWS Batch User Guide.

" }, "SecretList":{ "type":"list", @@ -1988,15 +2158,19 @@ }, "nodeOverrides":{ "shape":"NodeOverrides", - "documentation":"

A list of node overrides in JSON format that specify the node range to target and the container overrides for that node range.

" + "documentation":"

A list of node overrides in JSON format that specify the node range to target and the container overrides for that node range.

This parameter isn't applicable to jobs running on Fargate resources; use containerOverrides instead.

" }, "retryStrategy":{ "shape":"RetryStrategy", "documentation":"

The retry strategy to use for failed jobs from this SubmitJob operation. When a retry strategy is specified here, it overrides the retry strategy defined in the job definition.

" }, + "propagateTags":{ + "shape":"Boolean", + "documentation":"

Specifies whether to propagate the tags from the job or job definition to the corresponding Amazon ECS task. If no value is specified, the tags aren't propagated. Tags can only be propagated to the tasks during task creation. For tags with the same name, job tags are given priority over job definitions tags. If the total number of combined tags from the job and job definition is over 50, the job is moved to the FAILED state. When specified, this overrides the tag propagation setting in the job definition.

" + }, "timeout":{ "shape":"JobTimeout", - "documentation":"

The timeout configuration for this SubmitJob operation. You can specify a timeout duration after which AWS Batch terminates your jobs if they have not finished. If a job is terminated due to a timeout, it is not retried. The minimum value for the timeout is 60 seconds. This configuration overrides any timeout configuration specified in the job definition. For array jobs, child jobs have the same timeout configuration as the parent job. For more information, see Job Timeouts in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The timeout configuration for this SubmitJob operation. You can specify a timeout duration after which AWS Batch terminates your jobs if they haven't finished. If a job is terminated due to a timeout, it isn't retried. The minimum value for the timeout is 60 seconds. This configuration overrides any timeout configuration specified in the job definition. For array jobs, child jobs have the same timeout configuration as the parent job. For more information, see Job Timeouts in the Amazon Elastic Container Service Developer Guide.

" }, "tags":{ "shape":"TagrisTagsMap", @@ -2045,7 +2219,7 @@ "members":{ "resourceArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the resource to which to add tags. AWS Batch resources that support tags are compute environments, jobs, job definitions, and job queues. ARNs for child jobs of array and multi-node parallel (MNP) jobs are not supported.

", + "documentation":"

The Amazon Resource Name (ARN) of the resource that tags are added to. AWS Batch resources that support tags are compute environments, jobs, job definitions, and job queues. ARNs for child jobs of array and multi-node parallel (MNP) jobs are not supported.

", "location":"uri", "locationName":"resourceArn" }, @@ -2107,7 +2281,7 @@ "members":{ "containerPath":{ "shape":"String", - "documentation":"

The absolute file path in the container where the tmpfs volume is to be mounted.

" + "documentation":"

The absolute file path in the container where the tmpfs volume is mounted.

" }, "size":{ "shape":"Integer", @@ -2118,7 +2292,7 @@ "documentation":"

The list of tmpfs volume mount options.

Valid values: \"defaults\" | \"ro\" | \"rw\" | \"suid\" | \"nosuid\" | \"dev\" | \"nodev\" | \"exec\" | \"noexec\" | \"sync\" | \"async\" | \"dirsync\" | \"remount\" | \"mand\" | \"nomand\" | \"atime\" | \"noatime\" | \"diratime\" | \"nodiratime\" | \"bind\" | \"rbind\" | \"unbindable\" | \"runbindable\" | \"private\" | \"rprivate\" | \"shared\" | \"rshared\" | \"slave\" | \"rslave\" | \"relatime\" | \"norelatime\" | \"strictatime\" | \"nostrictatime\" | \"mode\" | \"uid\" | \"gid\" | \"nr_inodes\" | \"nr_blocks\" | \"mpol\"

" } }, - "documentation":"

The container path, mount options, and size of the tmpfs mount.

" + "documentation":"

The container path, mount options, and size of the tmpfs mount.

This object isn't applicable to jobs running on Fargate resources.

" }, "TmpfsList":{ "type":"list", @@ -2145,7 +2319,7 @@ "documentation":"

The soft limit for the ulimit type.

" } }, - "documentation":"

The ulimit settings to pass to the container.

" + "documentation":"

The ulimit settings to pass to the container.

This object isn't applicable to jobs running on Fargate resources.

" }, "Ulimits":{ "type":"list", @@ -2195,7 +2369,7 @@ }, "serviceRole":{ "shape":"String", - "documentation":"

The full Amazon Resource Name (ARN) of the IAM role that allows AWS Batch to make calls to other AWS services on your behalf.

If your specified role has a path other than /, then you must either specify the full role ARN (this is recommended) or prefix the role name with the path.

Depending on how you created your AWS Batch service role, its ARN may contain the service-role path prefix. When you only specify the name of the service role, AWS Batch assumes that your ARN does not use the service-role path prefix. Because of this, we recommend that you specify the full ARN of your service role when you create compute environments.

" + "documentation":"

The full Amazon Resource Name (ARN) of the IAM role that allows AWS Batch to make calls to other AWS services on your behalf.

If your specified role has a path other than /, then you must either specify the full role ARN (this is recommended) or prefix the role name with the path.

Depending on how you created your AWS Batch service role, its ARN might contain the service-role path prefix. When you only specify the name of the service role, AWS Batch assumes that your ARN does not use the service-role path prefix. Because of this, we recommend that you specify the full ARN of your service role when you create compute environments.

" } } }, @@ -2230,7 +2404,7 @@ }, "computeEnvironmentOrder":{ "shape":"ComputeEnvironmentOrders", - "documentation":"

Details the set of compute environments mapped to a job queue and their order relative to each other. This is one of the parameters used by the job scheduler to determine which compute environment should execute a given job.

" + "documentation":"

Details the set of compute environments mapped to a job queue and their order relative to each other. This is one of the parameters used by the job scheduler to determine which compute environment should run a given job. All of the compute environments must be either EC2 (EC2 or SPOT) or Fargate (FARGATE or FARGATE_SPOT); EC2 and Fargate compute environments can't be mixed.

" } } }, @@ -2252,7 +2426,7 @@ "members":{ "host":{ "shape":"Host", - "documentation":"

The contents of the host parameter determine whether your data volume persists on the host container instance and where it is stored. If the host parameter is empty, then the Docker daemon assigns a host path for your data volume. However, the data is not guaranteed to persist after the containers associated with it stop running.

" + "documentation":"

The contents of the host parameter determine whether your data volume persists on the host container instance and where it is stored. If the host parameter is empty, then the Docker daemon assigns a host path for your data volume. However, the data isn't guaranteed to persist after the containers associated with it stop running.

This parameter isn't applicable to jobs running on Fargate resources and shouldn't be provided.

" }, "name":{ "shape":"String", @@ -2266,5 +2440,5 @@ "member":{"shape":"Volume"} } }, - "documentation":"

AWS Batch enables you to run batch computing workloads on the AWS Cloud. Batch computing is a common way for developers, scientists, and engineers to access large amounts of compute resources, and AWS Batch removes the undifferentiated heavy lifting of configuring and managing the required infrastructure. AWS Batch will be familiar to users of traditional batch computing software. This service can efficiently provision resources in response to jobs submitted in order to eliminate capacity constraints, reduce compute costs, and deliver results quickly.

As a fully managed service, AWS Batch enables developers, scientists, and engineers to run batch computing workloads of any scale. AWS Batch automatically provisions compute resources and optimizes the workload distribution based on the quantity and scale of the workloads. With AWS Batch, there is no need to install or manage batch computing software, which allows you to focus on analyzing results and solving problems. AWS Batch reduces operational complexities, saves time, and reduces costs, which makes it easy for developers, scientists, and engineers to run their batch jobs in the AWS Cloud.

" + "documentation":"

Using AWS Batch, you can run batch computing workloads on the AWS Cloud. Batch computing is a common means for developers, scientists, and engineers to access large amounts of compute resources. AWS Batch utilizes the advantages of this computing workload to remove the undifferentiated heavy lifting of configuring and managing required infrastructure, while also adopting a familiar batch computing software approach. Given these advantages, AWS Batch can help you to efficiently provision resources in response to jobs submitted, thus effectively helping to eliminate capacity constraints, reduce compute costs, and deliver your results more quickly.

As a fully managed service, AWS Batch can run batch computing workloads of any scale. AWS Batch automatically provisions compute resources and optimizes workload distribution based on the quantity and scale of your specific workloads. With AWS Batch, there's no need to install or manage batch computing software. This means that you can focus your time and energy on analyzing results and solving your specific problems.

" } diff --git a/services/braket/pom.xml b/services/braket/pom.xml index 6b3b9839f07f..8670f963bd85 100644 --- a/services/braket/pom.xml +++ b/services/braket/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT braket AWS Java SDK :: Services :: Braket diff --git a/services/braket/src/main/resources/codegen-resources/service-2.json b/services/braket/src/main/resources/codegen-resources/service-2.json index cc5534c0f7f7..4f5b0c78b3af 100644 --- a/services/braket/src/main/resources/codegen-resources/service-2.json +++ b/services/braket/src/main/resources/codegen-resources/service-2.json @@ -87,6 +87,22 @@ ], "documentation":"

Retrieves the specified quantum task.

" }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Shows the tags associated with this resource.

" + }, "SearchDevices":{ "name":"SearchDevices", "http":{ @@ -120,6 +136,39 @@ {"shape":"ValidationException"} ], "documentation":"

Searches for tasks that match the specified filter values.

" + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Add a tag to the specified resource.

" + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Remove tags from a resource.

", + "idempotent":true } }, "shapes":{ @@ -175,8 +224,8 @@ "CancellationStatus":{ "type":"string", "enum":[ - "CANCELLED", - "CANCELLING" + "CANCELLING", + "CANCELLED" ] }, "ConflictException":{ @@ -217,40 +266,44 @@ "documentation":"

The ARN of the device to run the task on.

" }, "deviceParameters":{ - "shape":"CreateQuantumTaskRequestdeviceParametersJsonValue", + "shape":"CreateQuantumTaskRequestDeviceParametersString", "documentation":"

The parameters for the device to run the task on.

", "jsonvalue":true }, "outputS3Bucket":{ - "shape":"CreateQuantumTaskRequestoutputS3BucketString", + "shape":"CreateQuantumTaskRequestOutputS3BucketString", "documentation":"

The S3 bucket to store task result files in.

" }, "outputS3KeyPrefix":{ - "shape":"CreateQuantumTaskRequestoutputS3KeyPrefixString", + "shape":"CreateQuantumTaskRequestOutputS3KeyPrefixString", "documentation":"

The key prefix for the location in the S3 bucket to store task results in.

" }, "shots":{ - "shape":"CreateQuantumTaskRequestshotsLong", + "shape":"CreateQuantumTaskRequestShotsLong", "documentation":"

The number of shots to use for the task.

" + }, + "tags":{ + "shape":"TagsMap", + "documentation":"

Tags to be added to the quantum task you're creating.

" } } }, - "CreateQuantumTaskRequestdeviceParametersJsonValue":{ + "CreateQuantumTaskRequestDeviceParametersString":{ "type":"string", "max":2048, "min":1 }, - "CreateQuantumTaskRequestoutputS3BucketString":{ + "CreateQuantumTaskRequestOutputS3BucketString":{ "type":"string", "max":63, "min":3 }, - "CreateQuantumTaskRequestoutputS3KeyPrefixString":{ + "CreateQuantumTaskRequestOutputS3KeyPrefixString":{ "type":"string", "max":1024, "min":1 }, - "CreateQuantumTaskRequestshotsLong":{ + "CreateQuantumTaskRequestShotsLong":{ "type":"long", "box":true, "min":0 @@ -285,8 +338,8 @@ "DeviceStatus":{ "type":"string", "enum":[ - "OFFLINE", - "ONLINE" + "ONLINE", + "OFFLINE" ] }, "DeviceSummary":{ @@ -448,6 +501,10 @@ "status":{ "shape":"QuantumTaskStatus", "documentation":"

The status of the task.

" + }, + "tags":{ + "shape":"TagsMap", + "documentation":"

The tags that belong to this task.

" } } }, @@ -456,12 +513,33 @@ "members":{ "message":{"shape":"String"} }, - "documentation":"

The request processing has failed because of an unknown error, exception or failure.

", + "documentation":"

The request processing has failed because of an unknown error, exception, or failure.

", "error":{"httpStatusCode":500}, "exception":true, "fault":true }, "JsonValue":{"type":"string"}, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"String", + "documentation":"

Specify the resourceArn for the resource whose tags to display.

", + "location":"uri", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "tags":{ + "shape":"TagsMap", + "documentation":"

Displays the key, value pairs of tags associated with this resource.

" + } + } + }, "Long":{ "type":"long", "box":true @@ -474,13 +552,13 @@ "QuantumTaskStatus":{ "type":"string", "enum":[ - "CANCELLED", - "CANCELLING", - "COMPLETED", "CREATED", - "FAILED", "QUEUED", - "RUNNING" + "RUNNING", + "COMPLETED", + "FAILED", + "CANCELLING", + "CANCELLED" ] }, "QuantumTaskSummary":{ @@ -526,6 +604,10 @@ "status":{ "shape":"QuantumTaskStatus", "documentation":"

The status of the task.

" + }, + "tags":{ + "shape":"TagsMap", + "documentation":"

Displays the key, value pairs of tags associated with this quantum task.

" } }, "documentation":"

Includes information about a quantum task.

" @@ -554,22 +636,22 @@ ], "members":{ "name":{ - "shape":"SearchDevicesFilternameString", + "shape":"SearchDevicesFilterNameString", "documentation":"

The name to use to filter results.

" }, "values":{ - "shape":"SearchDevicesFiltervaluesString256List", + "shape":"SearchDevicesFilterValuesList", "documentation":"

The values to use to filter results.

" } }, "documentation":"

The filter to use for searching devices.

" }, - "SearchDevicesFilternameString":{ + "SearchDevicesFilterNameString":{ "type":"string", "max":64, "min":1 }, - "SearchDevicesFiltervaluesString256List":{ + "SearchDevicesFilterValuesList":{ "type":"list", "member":{"shape":"String256"}, "max":10, @@ -580,11 +662,11 @@ "required":["filters"], "members":{ "filters":{ - "shape":"SearchDevicesRequestfiltersSearchDevicesFilterList", + "shape":"SearchDevicesRequestFiltersList", "documentation":"

The filter values to use to search for a device.

" }, "maxResults":{ - "shape":"SearchDevicesRequestmaxResultsInteger", + "shape":"SearchDevicesRequestMaxResultsInteger", "documentation":"

The maximum number of results to return in the response.

" }, "nextToken":{ @@ -593,13 +675,13 @@ } } }, - "SearchDevicesRequestfiltersSearchDevicesFilterList":{ + "SearchDevicesRequestFiltersList":{ "type":"list", "member":{"shape":"SearchDevicesFilter"}, "max":10, "min":0 }, - "SearchDevicesRequestmaxResultsInteger":{ + "SearchDevicesRequestMaxResultsInteger":{ "type":"integer", "box":true, "max":100, @@ -636,7 +718,7 @@ "documentation":"

An operator to use in the filter.

" }, "values":{ - "shape":"SearchQuantumTasksFiltervaluesString256List", + "shape":"SearchQuantumTasksFilterValuesList", "documentation":"

The values to use for the filter.

" } }, @@ -645,15 +727,15 @@ "SearchQuantumTasksFilterOperator":{ "type":"string", "enum":[ - "BETWEEN", + "LT", + "LTE", "EQUAL", "GT", "GTE", - "LT", - "LTE" + "BETWEEN" ] }, - "SearchQuantumTasksFiltervaluesString256List":{ + "SearchQuantumTasksFilterValuesList":{ "type":"list", "member":{"shape":"String256"}, "max":10, @@ -664,11 +746,11 @@ "required":["filters"], "members":{ "filters":{ - "shape":"SearchQuantumTasksRequestfiltersSearchQuantumTasksFilterList", + "shape":"SearchQuantumTasksRequestFiltersList", "documentation":"

Array of SearchQuantumTasksFilter objects.

" }, "maxResults":{ - "shape":"SearchQuantumTasksRequestmaxResultsInteger", + "shape":"SearchQuantumTasksRequestMaxResultsInteger", "documentation":"

Maximum number of results to return in the response.

" }, "nextToken":{ @@ -677,13 +759,13 @@ } } }, - "SearchQuantumTasksRequestfiltersSearchQuantumTasksFilterList":{ + "SearchQuantumTasksRequestFiltersList":{ "type":"list", "member":{"shape":"SearchQuantumTasksFilter"}, "max":10, "min":0 }, - "SearchQuantumTasksRequestmaxResultsInteger":{ + "SearchQuantumTasksRequestMaxResultsInteger":{ "type":"integer", "box":true, "max":100, @@ -708,7 +790,7 @@ "members":{ "message":{"shape":"String"} }, - "documentation":"

The request failed because a service quota is met.

", + "documentation":"

The request failed because a service quota is exceeded.

", "error":{ "httpStatusCode":402, "senderFault":true @@ -730,6 +812,39 @@ "type":"timestamp", "timestampFormat":"iso8601" }, + "TagKeys":{ + "type":"list", + "member":{"shape":"String"} + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"String", + "documentation":"

Specify the resourceArn of the resource to which a tag will be added.

", + "location":"uri", + "locationName":"resourceArn" + }, + "tags":{ + "shape":"TagsMap", + "documentation":"

Specify the tags to add to the resource.

" + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagsMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, "ThrottlingException":{ "type":"structure", "members":{ @@ -742,6 +857,32 @@ }, "exception":true }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"String", + "documentation":"

Specify the resourceArn for the resource from which to remove the tags.

", + "location":"uri", + "locationName":"resourceArn" + }, + "tagKeys":{ + "shape":"TagKeys", + "documentation":"

pecify the keys for the tags to remove from the resource.

", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, "ValidationException":{ "type":"structure", "members":{ diff --git a/services/budgets/pom.xml b/services/budgets/pom.xml index 22c98724109f..3c65f167ef37 100644 --- a/services/budgets/pom.xml +++ b/services/budgets/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT budgets AWS Java SDK :: Services :: AWS Budgets diff --git a/services/budgets/src/main/resources/codegen-resources/paginators-1.json b/services/budgets/src/main/resources/codegen-resources/paginators-1.json index 03c71c8e45e1..d317a71f3c6c 100644 --- a/services/budgets/src/main/resources/codegen-resources/paginators-1.json +++ b/services/budgets/src/main/resources/codegen-resources/paginators-1.json @@ -1,5 +1,23 @@ { "pagination": { + "DescribeBudgetActionHistories": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "ActionHistories" + }, + "DescribeBudgetActionsForAccount": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Actions" + }, + "DescribeBudgetActionsForBudget": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Actions" + }, "DescribeBudgetPerformanceHistory": { "input_token": "NextToken", "limit_key": "MaxResults", diff --git a/services/budgets/src/main/resources/codegen-resources/service-2.json b/services/budgets/src/main/resources/codegen-resources/service-2.json index 2fb162a8e58d..cd5ecf7c328f 100755 --- a/services/budgets/src/main/resources/codegen-resources/service-2.json +++ b/services/budgets/src/main/resources/codegen-resources/service-2.json @@ -30,6 +30,24 @@ ], "documentation":"

Creates a budget and, if included, notifications and subscribers.

Only one of BudgetLimit or PlannedBudgetLimits can be present in the syntax at one time. Use the syntax that matches your case. The Request Syntax section shows the BudgetLimit syntax. For PlannedBudgetLimits, see the Examples section.

" }, + "CreateBudgetAction":{ + "name":"CreateBudgetAction", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateBudgetActionRequest"}, + "output":{"shape":"CreateBudgetActionResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"InternalErrorException"}, + {"shape":"CreationLimitExceededException"}, + {"shape":"DuplicateRecordException"}, + {"shape":"NotFoundException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Creates a budget action.

" + }, "CreateNotification":{ "name":"CreateNotification", "http":{ @@ -82,6 +100,23 @@ ], "documentation":"

Deletes a budget. You can delete your budget at any time.

Deleting a budget also deletes the notifications and subscribers that are associated with that budget.

" }, + "DeleteBudgetAction":{ + "name":"DeleteBudgetAction", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteBudgetActionRequest"}, + "output":{"shape":"DeleteBudgetActionResponse"}, + "errors":[ + {"shape":"InternalErrorException"}, + {"shape":"InvalidParameterException"}, + {"shape":"NotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceLockedException"} + ], + "documentation":"

Deletes a budget action.

" + }, "DeleteNotification":{ "name":"DeleteNotification", "http":{ @@ -130,6 +165,72 @@ ], "documentation":"

Describes a budget.

The Request Syntax section shows the BudgetLimit syntax. For PlannedBudgetLimits, see the Examples section.

" }, + "DescribeBudgetAction":{ + "name":"DescribeBudgetAction", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeBudgetActionRequest"}, + "output":{"shape":"DescribeBudgetActionResponse"}, + "errors":[ + {"shape":"InternalErrorException"}, + {"shape":"InvalidParameterException"}, + {"shape":"NotFoundException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Describes a budget action detail.

" + }, + "DescribeBudgetActionHistories":{ + "name":"DescribeBudgetActionHistories", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeBudgetActionHistoriesRequest"}, + "output":{"shape":"DescribeBudgetActionHistoriesResponse"}, + "errors":[ + {"shape":"InternalErrorException"}, + {"shape":"InvalidParameterException"}, + {"shape":"NotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InvalidNextTokenException"} + ], + "documentation":"

Describes a budget action history detail.

" + }, + "DescribeBudgetActionsForAccount":{ + "name":"DescribeBudgetActionsForAccount", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeBudgetActionsForAccountRequest"}, + "output":{"shape":"DescribeBudgetActionsForAccountResponse"}, + "errors":[ + {"shape":"InternalErrorException"}, + {"shape":"InvalidParameterException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InvalidNextTokenException"} + ], + "documentation":"

Describes all of the budget actions for an account.

" + }, + "DescribeBudgetActionsForBudget":{ + "name":"DescribeBudgetActionsForBudget", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeBudgetActionsForBudgetRequest"}, + "output":{"shape":"DescribeBudgetActionsForBudgetResponse"}, + "errors":[ + {"shape":"InternalErrorException"}, + {"shape":"InvalidParameterException"}, + {"shape":"NotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InvalidNextTokenException"} + ], + "documentation":"

Describes all of the budget actions for a budget.

" + }, "DescribeBudgetPerformanceHistory":{ "name":"DescribeBudgetPerformanceHistory", "http":{ @@ -202,6 +303,23 @@ ], "documentation":"

Lists the subscribers that are associated with a notification.

" }, + "ExecuteBudgetAction":{ + "name":"ExecuteBudgetAction", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ExecuteBudgetActionRequest"}, + "output":{"shape":"ExecuteBudgetActionResponse"}, + "errors":[ + {"shape":"InternalErrorException"}, + {"shape":"InvalidParameterException"}, + {"shape":"NotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceLockedException"} + ], + "documentation":"

Executes a budget action.

" + }, "UpdateBudget":{ "name":"UpdateBudget", "http":{ @@ -218,6 +336,23 @@ ], "documentation":"

Updates a budget. You can change every part of a budget except for the budgetName and the calculatedSpend. When you modify a budget, the calculatedSpend drops to zero until AWS has new usage data to use for forecasting.

Only one of BudgetLimit or PlannedBudgetLimits can be present in the syntax at one time. Use the syntax that matches your case. The Request Syntax section shows the BudgetLimit syntax. For PlannedBudgetLimits, see the Examples section.

" }, + "UpdateBudgetAction":{ + "name":"UpdateBudgetAction", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateBudgetActionRequest"}, + "output":{"shape":"UpdateBudgetActionResponse"}, + "errors":[ + {"shape":"InternalErrorException"}, + {"shape":"InvalidParameterException"}, + {"shape":"NotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceLockedException"} + ], + "documentation":"

Updates a budget action.

" + }, "UpdateNotification":{ "name":"UpdateNotification", "http":{ @@ -269,6 +404,162 @@ "min":12, "pattern":"\\d{12}" }, + "Action":{ + "type":"structure", + "required":[ + "ActionId", + "BudgetName", + "NotificationType", + "ActionType", + "ActionThreshold", + "Definition", + "ExecutionRoleArn", + "ApprovalModel", + "Status", + "Subscribers" + ], + "members":{ + "ActionId":{ + "shape":"ActionId", + "documentation":"

A system-generated universally unique identifier (UUID) for the action.

" + }, + "BudgetName":{"shape":"BudgetName"}, + "NotificationType":{"shape":"NotificationType"}, + "ActionType":{ + "shape":"ActionType", + "documentation":"

The type of action. This defines the type of tasks that can be carried out by this action. This field also determines the format for definition.

" + }, + "ActionThreshold":{ + "shape":"ActionThreshold", + "documentation":"

The trigger threshold of the action.

" + }, + "Definition":{ + "shape":"Definition", + "documentation":"

Where you specify all of the type-specific parameters.

" + }, + "ExecutionRoleArn":{ + "shape":"RoleArn", + "documentation":"

The role passed for action execution and reversion. Roles and actions must be in the same account.

" + }, + "ApprovalModel":{ + "shape":"ApprovalModel", + "documentation":"

This specifies if the action needs manual or automatic approval.

" + }, + "Status":{ + "shape":"ActionStatus", + "documentation":"

The status of action.

" + }, + "Subscribers":{"shape":"Subscribers"} + }, + "documentation":"

A budget action resource.

" + }, + "ActionHistories":{ + "type":"list", + "member":{"shape":"ActionHistory"}, + "max":100, + "min":0 + }, + "ActionHistory":{ + "type":"structure", + "required":[ + "Timestamp", + "Status", + "EventType", + "ActionHistoryDetails" + ], + "members":{ + "Timestamp":{"shape":"GenericTimestamp"}, + "Status":{ + "shape":"ActionStatus", + "documentation":"

The status of action at the time of the event.

" + }, + "EventType":{ + "shape":"EventType", + "documentation":"

This distinguishes between whether the events are triggered by the user or generated by the system.

" + }, + "ActionHistoryDetails":{ + "shape":"ActionHistoryDetails", + "documentation":"

The description of details of the event.

" + } + }, + "documentation":"

The historical records for a budget action.

" + }, + "ActionHistoryDetails":{ + "type":"structure", + "required":[ + "Message", + "Action" + ], + "members":{ + "Message":{"shape":"GenericString"}, + "Action":{ + "shape":"Action", + "documentation":"

The budget action resource.

" + } + }, + "documentation":"

The description of details of the event.

" + }, + "ActionId":{ + "type":"string", + "max":36, + "min":36, + "pattern":"^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$" + }, + "ActionStatus":{ + "type":"string", + "enum":[ + "STANDBY", + "PENDING", + "EXECUTION_IN_PROGRESS", + "EXECUTION_SUCCESS", + "EXECUTION_FAILURE", + "REVERSE_IN_PROGRESS", + "REVERSE_SUCCESS", + "REVERSE_FAILURE", + "RESET_IN_PROGRESS", + "RESET_FAILURE" + ] + }, + "ActionSubType":{ + "type":"string", + "enum":[ + "STOP_EC2_INSTANCES", + "STOP_RDS_INSTANCES" + ] + }, + "ActionThreshold":{ + "type":"structure", + "required":[ + "ActionThresholdValue", + "ActionThresholdType" + ], + "members":{ + "ActionThresholdValue":{"shape":"NotificationThreshold"}, + "ActionThresholdType":{"shape":"ThresholdType"} + }, + "documentation":"

The trigger threshold of the action.

" + }, + "ActionType":{ + "type":"string", + "enum":[ + "APPLY_IAM_POLICY", + "APPLY_SCP_POLICY", + "RUN_SSM_DOCUMENTS" + ] + }, + "Actions":{ + "type":"list", + "member":{"shape":"Action"}, + "max":100, + "min":0 + }, + "ApprovalModel":{ + "type":"string", + "enum":[ + "AUTOMATIC", + "MANUAL" + ] + }, "Budget":{ "type":"structure", "required":[ @@ -467,6 +758,56 @@ }, "documentation":"

The types of cost that are included in a COST budget, such as tax and subscriptions.

USAGE, RI_UTILIZATION, RI_COVERAGE, SAVINGS_PLANS_UTILIZATION, and SAVINGS_PLANS_COVERAGE budgets do not have CostTypes.

" }, + "CreateBudgetActionRequest":{ + "type":"structure", + "required":[ + "AccountId", + "BudgetName", + "NotificationType", + "ActionType", + "ActionThreshold", + "Definition", + "ExecutionRoleArn", + "ApprovalModel", + "Subscribers" + ], + "members":{ + "AccountId":{"shape":"AccountId"}, + "BudgetName":{"shape":"BudgetName"}, + "NotificationType":{"shape":"NotificationType"}, + "ActionType":{ + "shape":"ActionType", + "documentation":"

The type of action. This defines the type of tasks that can be carried out by this action. This field also determines the format for definition.

" + }, + "ActionThreshold":{"shape":"ActionThreshold"}, + "Definition":{"shape":"Definition"}, + "ExecutionRoleArn":{ + "shape":"RoleArn", + "documentation":"

The role passed for action execution and reversion. Roles and actions must be in the same account.

" + }, + "ApprovalModel":{ + "shape":"ApprovalModel", + "documentation":"

This specifies if the action needs manual or automatic approval.

" + }, + "Subscribers":{"shape":"Subscribers"} + } + }, + "CreateBudgetActionResponse":{ + "type":"structure", + "required":[ + "AccountId", + "BudgetName", + "ActionId" + ], + "members":{ + "AccountId":{"shape":"AccountId"}, + "BudgetName":{"shape":"BudgetName"}, + "ActionId":{ + "shape":"ActionId", + "documentation":"

A system-generated universally unique identifier (UUID) for the action.

" + } + } + }, "CreateBudgetRequest":{ "type":"structure", "required":[ @@ -571,6 +912,53 @@ "documentation":"

You've exceeded the notification or subscriber limit.

", "exception":true }, + "Definition":{ + "type":"structure", + "members":{ + "IamActionDefinition":{ + "shape":"IamActionDefinition", + "documentation":"

The AWS Identity and Access Management (IAM) action definition details.

" + }, + "ScpActionDefinition":{ + "shape":"ScpActionDefinition", + "documentation":"

The service control policies (SCPs) action definition details.

" + }, + "SsmActionDefinition":{ + "shape":"SsmActionDefinition", + "documentation":"

The AWS Systems Manager (SSM) action definition details.

" + } + }, + "documentation":"

Specifies all of the type-specific parameters.

" + }, + "DeleteBudgetActionRequest":{ + "type":"structure", + "required":[ + "AccountId", + "BudgetName", + "ActionId" + ], + "members":{ + "AccountId":{"shape":"AccountId"}, + "BudgetName":{"shape":"BudgetName"}, + "ActionId":{ + "shape":"ActionId", + "documentation":"

A system-generated universally unique identifier (UUID) for the action.

" + } + } + }, + "DeleteBudgetActionResponse":{ + "type":"structure", + "required":[ + "AccountId", + "BudgetName", + "Action" + ], + "members":{ + "AccountId":{"shape":"AccountId"}, + "BudgetName":{"shape":"BudgetName"}, + "Action":{"shape":"Action"} + } + }, "DeleteBudgetRequest":{ "type":"structure", "required":[ @@ -658,6 +1046,112 @@ }, "documentation":"

Response of DeleteSubscriber

" }, + "DescribeBudgetActionHistoriesRequest":{ + "type":"structure", + "required":[ + "AccountId", + "BudgetName", + "ActionId" + ], + "members":{ + "AccountId":{"shape":"AccountId"}, + "BudgetName":{"shape":"BudgetName"}, + "ActionId":{ + "shape":"ActionId", + "documentation":"

A system-generated universally unique identifier (UUID) for the action.

" + }, + "TimePeriod":{"shape":"TimePeriod"}, + "MaxResults":{"shape":"MaxResults"}, + "NextToken":{"shape":"GenericString"} + } + }, + "DescribeBudgetActionHistoriesResponse":{ + "type":"structure", + "required":["ActionHistories"], + "members":{ + "ActionHistories":{ + "shape":"ActionHistories", + "documentation":"

The historical record of the budget action resource.

" + }, + "NextToken":{"shape":"GenericString"} + } + }, + "DescribeBudgetActionRequest":{ + "type":"structure", + "required":[ + "AccountId", + "BudgetName", + "ActionId" + ], + "members":{ + "AccountId":{"shape":"AccountId"}, + "BudgetName":{"shape":"BudgetName"}, + "ActionId":{ + "shape":"ActionId", + "documentation":"

A system-generated universally unique identifier (UUID) for the action.

" + } + } + }, + "DescribeBudgetActionResponse":{ + "type":"structure", + "required":[ + "AccountId", + "BudgetName", + "Action" + ], + "members":{ + "AccountId":{"shape":"AccountId"}, + "BudgetName":{"shape":"BudgetName"}, + "Action":{ + "shape":"Action", + "documentation":"

A budget action resource.

" + } + } + }, + "DescribeBudgetActionsForAccountRequest":{ + "type":"structure", + "required":["AccountId"], + "members":{ + "AccountId":{"shape":"AccountId"}, + "MaxResults":{"shape":"MaxResults"}, + "NextToken":{"shape":"GenericString"} + } + }, + "DescribeBudgetActionsForAccountResponse":{ + "type":"structure", + "required":["Actions"], + "members":{ + "Actions":{ + "shape":"Actions", + "documentation":"

A list of the budget action resources information.

" + }, + "NextToken":{"shape":"GenericString"} + } + }, + "DescribeBudgetActionsForBudgetRequest":{ + "type":"structure", + "required":[ + "AccountId", + "BudgetName" + ], + "members":{ + "AccountId":{"shape":"AccountId"}, + "BudgetName":{"shape":"BudgetName"}, + "MaxResults":{"shape":"MaxResults"}, + "NextToken":{"shape":"GenericString"} + } + }, + "DescribeBudgetActionsForBudgetResponse":{ + "type":"structure", + "required":["Actions"], + "members":{ + "Actions":{ + "shape":"Actions", + "documentation":"

A list of the budget action resources information.

" + }, + "NextToken":{"shape":"GenericString"} + } + }, "DescribeBudgetPerformanceHistoryRequest":{ "type":"structure", "required":[ @@ -843,6 +1337,67 @@ "documentation":"

The budget name already exists. Budget names must be unique within an account.

", "exception":true }, + "EventType":{ + "type":"string", + "enum":[ + "SYSTEM", + "CREATE_ACTION", + "DELETE_ACTION", + "UPDATE_ACTION", + "EXECUTE_ACTION" + ] + }, + "ExecuteBudgetActionRequest":{ + "type":"structure", + "required":[ + "AccountId", + "BudgetName", + "ActionId", + "ExecutionType" + ], + "members":{ + "AccountId":{"shape":"AccountId"}, + "BudgetName":{"shape":"BudgetName"}, + "ActionId":{ + "shape":"ActionId", + "documentation":"

A system-generated universally unique identifier (UUID) for the action.

" + }, + "ExecutionType":{ + "shape":"ExecutionType", + "documentation":"

The type of execution.

" + } + } + }, + "ExecuteBudgetActionResponse":{ + "type":"structure", + "required":[ + "AccountId", + "BudgetName", + "ActionId", + "ExecutionType" + ], + "members":{ + "AccountId":{"shape":"AccountId"}, + "BudgetName":{"shape":"BudgetName"}, + "ActionId":{ + "shape":"ActionId", + "documentation":"

A system-generated universally unique identifier (UUID) for the action.

" + }, + "ExecutionType":{ + "shape":"ExecutionType", + "documentation":"

The type of execution.

" + } + } + }, + "ExecutionType":{ + "type":"string", + "enum":[ + "APPROVE_BUDGET_ACTION", + "RETRY_BUDGET_ACTION", + "REVERSE_BUDGET_ACTION", + "RESET_BUDGET_ACTION" + ] + }, "ExpiredNextTokenException":{ "type":"structure", "members":{ @@ -862,6 +1417,53 @@ "type":"timestamp", "documentation":"

A generic time stamp. In Java, it is transformed to a Date object.

" }, + "Group":{ + "type":"string", + "max":640, + "min":1, + "pattern":"^([\\u0021-\\u007F]+\\u002F)?[\\w+=,.@-]+$" + }, + "Groups":{ + "type":"list", + "member":{"shape":"Group"}, + "max":100, + "min":1 + }, + "IamActionDefinition":{ + "type":"structure", + "required":["PolicyArn"], + "members":{ + "PolicyArn":{ + "shape":"PolicyArn", + "documentation":"

The Amazon Resource Name (ARN) of the policy to be attached.

" + }, + "Roles":{ + "shape":"Roles", + "documentation":"

A list of roles to be attached. There must be at least one role.

" + }, + "Groups":{ + "shape":"Groups", + "documentation":"

A list of groups to be attached. There must be at least one group.

" + }, + "Users":{ + "shape":"Users", + "documentation":"

A list of users to be attached. There must be at least one user.

" + } + }, + "documentation":"

The AWS Identity and Access Management (IAM) action definition details.

" + }, + "InstanceId":{ + "type":"string", + "max":63, + "min":1, + "pattern":"^i-(\\w{8}|\\w{17})$|^[a-zA-Z]([\\w-]{0,61}\\w)?$" + }, + "InstanceIds":{ + "type":"list", + "member":{"shape":"InstanceId"}, + "max":100, + "min":1 + }, "InternalErrorException":{ "type":"structure", "members":{ @@ -941,7 +1543,7 @@ }, "NotificationThreshold":{ "type":"double", - "documentation":"

The threshold of a notification. It must be a number between 0 and 1,000,000,000.

", + "documentation":"

The threshold of a notification.

", "max":40000000000, "min":0 }, @@ -998,6 +1600,68 @@ "key":{"shape":"GenericString"}, "value":{"shape":"Spend"} }, + "PolicyArn":{ + "type":"string", + "max":684, + "min":25, + "pattern":"^arn:(aws|aws-cn|aws-us-gov|us-iso-east-1|us-isob-east-1):iam::(\\d{12}|aws):policy(\\u002F[\\u0021-\\u007F]+\\u002F|\\u002F)[\\w+=,.@-]+$" + }, + "PolicyId":{ + "type":"string", + "max":130, + "min":10, + "pattern":"^p-[0-9a-zA-Z_]{8,128}$" + }, + "Region":{ + "type":"string", + "max":20, + "min":9, + "pattern":"^\\w{2}-\\w+(-\\w+)?-\\d$" + }, + "ResourceLockedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"errorMessage"} + }, + "documentation":"

The request was received and recognized by the server, but the server rejected that particular method for the requested resource.

", + "exception":true + }, + "Role":{ + "type":"string", + "max":576, + "min":1, + "pattern":"^([\\u0021-\\u007F]+\\u002F)?[\\w+=,.@-]+$" + }, + "RoleArn":{ + "type":"string", + "max":618, + "min":32, + "pattern":"^arn:(aws|aws-cn|aws-us-gov|us-iso-east-1|us-isob-east-1):iam::\\d{12}:role(\\u002F[\\u0021-\\u007F]+\\u002F|\\u002F)[\\w+=,.@-]+$" + }, + "Roles":{ + "type":"list", + "member":{"shape":"Role"}, + "max":100, + "min":1 + }, + "ScpActionDefinition":{ + "type":"structure", + "required":[ + "PolicyId", + "TargetIds" + ], + "members":{ + "PolicyId":{ + "shape":"PolicyId", + "documentation":"

The policy ID attached.

" + }, + "TargetIds":{ + "shape":"TargetIds", + "documentation":"

A list of target IDs.

" + } + }, + "documentation":"

The service control policies (SCP) action definition details.

" + }, "Spend":{ "type":"structure", "required":[ @@ -1016,6 +1680,29 @@ }, "documentation":"

The amount of cost or usage that is measured for a budget.

For example, a Spend for 3 GB of S3 usage would have the following parameters:

  • An Amount of 3

  • A unit of GB

" }, + "SsmActionDefinition":{ + "type":"structure", + "required":[ + "ActionSubType", + "Region", + "InstanceIds" + ], + "members":{ + "ActionSubType":{ + "shape":"ActionSubType", + "documentation":"

The action subType.

" + }, + "Region":{ + "shape":"Region", + "documentation":"

The Region to run the SSM document.

" + }, + "InstanceIds":{ + "shape":"InstanceIds", + "documentation":"

The EC2 and RDS instance IDs.

" + } + }, + "documentation":"

The AWS Systems Manager (SSM) action definition details.

" + }, "Subscriber":{ "type":"structure", "required":[ @@ -1057,9 +1744,21 @@ "EMAIL" ] }, + "TargetId":{ + "type":"string", + "max":68, + "min":12, + "pattern":"^(ou-[0-9a-z]{4,32}-[a-z0-9]{8,32}$)|(\\d{12})" + }, + "TargetIds":{ + "type":"list", + "member":{"shape":"TargetId"}, + "max":100, + "min":1 + }, "ThresholdType":{ "type":"string", - "documentation":"

The type of threshold for a notification. It can be PERCENTAGE or ABSOLUTE_VALUE.

", + "documentation":"

The type of threshold for a notification.

", "enum":[ "PERCENTAGE", "ABSOLUTE_VALUE" @@ -1096,6 +1795,55 @@ "min":1, "pattern":".*" }, + "UpdateBudgetActionRequest":{ + "type":"structure", + "required":[ + "AccountId", + "BudgetName", + "ActionId" + ], + "members":{ + "AccountId":{"shape":"AccountId"}, + "BudgetName":{"shape":"BudgetName"}, + "ActionId":{ + "shape":"ActionId", + "documentation":"

A system-generated universally unique identifier (UUID) for the action.

" + }, + "NotificationType":{"shape":"NotificationType"}, + "ActionThreshold":{"shape":"ActionThreshold"}, + "Definition":{"shape":"Definition"}, + "ExecutionRoleArn":{ + "shape":"RoleArn", + "documentation":"

The role passed for action execution and reversion. Roles and actions must be in the same account.

" + }, + "ApprovalModel":{ + "shape":"ApprovalModel", + "documentation":"

This specifies if the action needs manual or automatic approval.

" + }, + "Subscribers":{"shape":"Subscribers"} + } + }, + "UpdateBudgetActionResponse":{ + "type":"structure", + "required":[ + "AccountId", + "BudgetName", + "OldAction", + "NewAction" + ], + "members":{ + "AccountId":{"shape":"AccountId"}, + "BudgetName":{"shape":"BudgetName"}, + "OldAction":{ + "shape":"Action", + "documentation":"

The previous action resource information.

" + }, + "NewAction":{ + "shape":"Action", + "documentation":"

The updated action resource information.

" + } + } + }, "UpdateBudgetRequest":{ "type":"structure", "required":[ @@ -1193,6 +1941,18 @@ }, "documentation":"

Response of UpdateSubscriber

" }, + "User":{ + "type":"string", + "max":576, + "min":1, + "pattern":"^([\\u0021-\\u007F]+\\u002F)?[\\w+=,.@-]+$" + }, + "Users":{ + "type":"list", + "member":{"shape":"User"}, + "max":100, + "min":1 + }, "errorMessage":{ "type":"string", "documentation":"

The error message the exception carries.

" diff --git a/services/chime/pom.xml b/services/chime/pom.xml index defe837ae969..304eb42832f7 100644 --- a/services/chime/pom.xml +++ b/services/chime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT chime AWS Java SDK :: Services :: Chime diff --git a/services/chime/src/main/resources/codegen-resources/paginators-1.json b/services/chime/src/main/resources/codegen-resources/paginators-1.json index 6727698813d8..83b1912f0785 100644 --- a/services/chime/src/main/resources/codegen-resources/paginators-1.json +++ b/services/chime/src/main/resources/codegen-resources/paginators-1.json @@ -5,6 +5,21 @@ "output_token": "NextToken", "limit_key": "MaxResults" }, + "ListAppInstanceAdmins": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListAppInstanceUsers": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListAppInstances": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, "ListAttendees": { "input_token": "NextToken", "output_token": "NextToken", @@ -15,6 +30,41 @@ "output_token": "NextToken", "limit_key": "MaxResults" }, + "ListChannelBans": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListChannelMemberships": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListChannelMembershipsForAppInstanceUser": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListChannelMessages": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListChannelModerators": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListChannels": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListChannelsModeratedByAppInstanceUser": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, "ListMeetings": { "input_token": "NextToken", "output_token": "NextToken", @@ -45,6 +95,16 @@ "output_token": "NextToken", "limit_key": "MaxResults" }, + "ListSipMediaApplications": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListSipRules": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, "ListUsers": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/services/chime/src/main/resources/codegen-resources/service-2.json b/services/chime/src/main/resources/codegen-resources/service-2.json index 683ba7a416e1..d5f99007e5d4 100644 --- a/services/chime/src/main/resources/codegen-resources/service-2.json +++ b/services/chime/src/main/resources/codegen-resources/service-2.json @@ -254,6 +254,72 @@ ], "documentation":"

Creates an Amazon Chime account under the administrator's AWS account. Only Team account types are currently supported for this action. For more information about different account types, see Managing Your Amazon Chime Accounts in the Amazon Chime Administration Guide.

" }, + "CreateAppInstance":{ + "name":"CreateAppInstance", + "http":{ + "method":"POST", + "requestUri":"/app-instances", + "responseCode":201 + }, + "input":{"shape":"CreateAppInstanceRequest"}, + "output":{"shape":"CreateAppInstanceResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, + {"shape":"ForbiddenException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"ThrottledClientException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Creates an Amazon Chime Messaging SDK AppInstance under an AWS Account. Only Messaging SDK customers use this API. CreateAppInstance supports idempotency behavior as described in the AWS API Standard.

", + "endpoint":{"hostPrefix":"identity-"} + }, + "CreateAppInstanceAdmin":{ + "name":"CreateAppInstanceAdmin", + "http":{ + "method":"POST", + "requestUri":"/app-instances/{appInstanceArn}/admins", + "responseCode":201 + }, + "input":{"shape":"CreateAppInstanceAdminRequest"}, + "output":{"shape":"CreateAppInstanceAdminResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, + {"shape":"ForbiddenException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"ThrottledClientException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Promotes an AppInstanceUser to an AppInstanceAdmin. The promoted user can perform the following actions.

  • ChannelModerator actions across all channels in the app instance.

  • DeleteChannelMessage actions.

Only an AppInstanceUser can be promoted to an AppInstanceAdmin role.

", + "endpoint":{"hostPrefix":"identity-"} + }, + "CreateAppInstanceUser":{ + "name":"CreateAppInstanceUser", + "http":{ + "method":"POST", + "requestUri":"/app-instance-users", + "responseCode":201 + }, + "input":{"shape":"CreateAppInstanceUserRequest"}, + "output":{"shape":"CreateAppInstanceUserResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, + {"shape":"ForbiddenException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"ThrottledClientException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Creates a user under an Amazon Chime AppInstance. The request consists of a unique appInstanceUserId and Name for that user.

", + "endpoint":{"hostPrefix":"identity-"} + }, "CreateAttendee":{ "name":"CreateAttendee", "http":{ @@ -296,6 +362,94 @@ ], "documentation":"

Creates a bot for an Amazon Chime Enterprise account.

" }, + "CreateChannel":{ + "name":"CreateChannel", + "http":{ + "method":"POST", + "requestUri":"/channels", + "responseCode":201 + }, + "input":{"shape":"CreateChannelRequest"}, + "output":{"shape":"CreateChannelResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Creates a channel to which you can add users and send messages.

Restriction: You can't change a channel's privacy.

", + "endpoint":{"hostPrefix":"messaging-"} + }, + "CreateChannelBan":{ + "name":"CreateChannelBan", + "http":{ + "method":"POST", + "requestUri":"/channels/{channelArn}/bans", + "responseCode":201 + }, + "input":{"shape":"CreateChannelBanRequest"}, + "output":{"shape":"CreateChannelBanResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Permanently bans a member from a channel. Moderators can't add banned members to a channel. To undo a ban, you first have to DeleteChannelBan, and then CreateChannelMembership. Bans are cleaned up when you delete users or channels.

If you ban a user who is already part of a channel, that user is automatically kicked from the channel.

", + "endpoint":{"hostPrefix":"messaging-"} + }, + "CreateChannelMembership":{ + "name":"CreateChannelMembership", + "http":{ + "method":"POST", + "requestUri":"/channels/{channelArn}/memberships", + "responseCode":201 + }, + "input":{"shape":"CreateChannelMembershipRequest"}, + "output":{"shape":"CreateChannelMembershipResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Adds a user to a channel. The InvitedBy response field is derived from the request header. A channel member can:

  • List messages

  • Send messages

  • Receive messages

  • Edit their own messages

  • Leave the channel

Privacy settings impact this action as follows:

  • Public Channels: You do not need to be a member to list messages, but you must be a member to send messages.

  • Private Channels: You must be a member to list or send messages.

", + "endpoint":{"hostPrefix":"messaging-"} + }, + "CreateChannelModerator":{ + "name":"CreateChannelModerator", + "http":{ + "method":"POST", + "requestUri":"/channels/{channelArn}/moderators", + "responseCode":201 + }, + "input":{"shape":"CreateChannelModeratorRequest"}, + "output":{"shape":"CreateChannelModeratorResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Creates a new ChannelModerator. A channel moderator can:

  • Add and remove other members of the channel.

  • Add and remove other moderators of the channel.

  • Add and remove user bans for the channel.

  • Redact messages in the channel.

  • List messages in the channel.

", + "endpoint":{"hostPrefix":"messaging-"} + }, "CreateMeeting":{ "name":"CreateMeeting", "http":{ @@ -316,6 +470,26 @@ ], "documentation":"

Creates a new Amazon Chime SDK meeting in the specified media Region with no initial attendees. For more information about specifying media Regions, see Amazon Chime SDK Media Regions in the Amazon Chime Developer Guide. For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime Developer Guide.

" }, + "CreateMeetingDialOut":{ + "name":"CreateMeetingDialOut", + "http":{ + "method":"POST", + "requestUri":"/meetings/{meetingId}/dial-outs", + "responseCode":201 + }, + "input":{"shape":"CreateMeetingDialOutRequest"}, + "output":{"shape":"CreateMeetingDialOutResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"ThrottledClientException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Uses the join token and call metadata in a meeting request (From number, To number, and so forth) to initiate an outbound call to a public switched telephone network (PSTN) and joins them into Chime meeting. Also ensures that the From number belongs to the customer.

To play welcome audio or implement an interactive voice response (IVR), use the CreateSipMediaApplicationCall API with the corresponding SIP media application ID.

" + }, "CreateMeetingWithAttendees":{ "name":"CreateMeetingWithAttendees", "http":{ @@ -420,6 +594,70 @@ ], "documentation":"

Adds a member to a chat room in an Amazon Chime Enterprise account. A member can be either a user or a bot. The member role designates whether the member is a chat room administrator or a general chat room member.

" }, + "CreateSipMediaApplication":{ + "name":"CreateSipMediaApplication", + "http":{ + "method":"POST", + "requestUri":"/sip-media-applications", + "responseCode":201 + }, + "input":{"shape":"CreateSipMediaApplicationRequest"}, + "output":{"shape":"CreateSipMediaApplicationResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"AccessDeniedException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"ConflictException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Creates a SIP media application.

" + }, + "CreateSipMediaApplicationCall":{ + "name":"CreateSipMediaApplicationCall", + "http":{ + "method":"POST", + "requestUri":"/sip-media-applications/{sipMediaApplicationId}/calls", + "responseCode":201 + }, + "input":{"shape":"CreateSipMediaApplicationCallRequest"}, + "output":{"shape":"CreateSipMediaApplicationCallResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"ThrottledClientException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Creates an outbound call to a phone number from the phone number specified in the request, and it invokes the endpoint of the specified sipMediaApplicationId.

" + }, + "CreateSipRule":{ + "name":"CreateSipRule", + "http":{ + "method":"POST", + "requestUri":"/sip-rules", + "responseCode":201 + }, + "input":{"shape":"CreateSipRuleRequest"}, + "output":{"shape":"CreateSipRuleResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"AccessDeniedException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"ConflictException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Creates a SIP rule which can be used to run a SIP media application as a target for a specific trigger type.

" + }, "CreateUser":{ "name":"CreateUser", "http":{ @@ -504,205 +742,243 @@ ], "documentation":"

Deletes the specified Amazon Chime account. You must suspend all users before deleting a Team account. You can use the BatchSuspendUser action to do so.

For EnterpriseLWA and EnterpriseAD accounts, you must release the claimed domains for your Amazon Chime account before deletion. As soon as you release the domain, all users under that account are suspended.

Deleted accounts appear in your Disabled accounts list for 90 days. To restore a deleted account from your Disabled accounts list, you must contact AWS Support.

After 90 days, deleted accounts are permanently removed from your Disabled accounts list.

" }, - "DeleteAttendee":{ - "name":"DeleteAttendee", + "DeleteAppInstance":{ + "name":"DeleteAppInstance", "http":{ "method":"DELETE", - "requestUri":"/meetings/{meetingId}/attendees/{attendeeId}", + "requestUri":"/app-instances/{appInstanceArn}", "responseCode":204 }, - "input":{"shape":"DeleteAttendeeRequest"}, + "input":{"shape":"DeleteAppInstanceRequest"}, "errors":[ {"shape":"BadRequestException"}, {"shape":"ForbiddenException"}, {"shape":"ThrottledClientException"}, - {"shape":"NotFoundException"}, {"shape":"UnauthorizedClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Deletes an attendee from the specified Amazon Chime SDK meeting and deletes their JoinToken. Attendees are automatically deleted when a Amazon Chime SDK meeting is deleted. For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime Developer Guide.

" + "documentation":"

Deletes an AppInstance and all associated data asynchronously.

", + "endpoint":{"hostPrefix":"identity-"} }, - "DeleteEventsConfiguration":{ - "name":"DeleteEventsConfiguration", + "DeleteAppInstanceAdmin":{ + "name":"DeleteAppInstanceAdmin", "http":{ "method":"DELETE", - "requestUri":"/accounts/{accountId}/bots/{botId}/events-configuration", + "requestUri":"/app-instances/{appInstanceArn}/admins/{appInstanceAdminArn}", "responseCode":204 }, - "input":{"shape":"DeleteEventsConfigurationRequest"}, + "input":{"shape":"DeleteAppInstanceAdminRequest"}, "errors":[ - {"shape":"ServiceUnavailableException"}, - {"shape":"ServiceFailureException"}, - {"shape":"ForbiddenException"}, {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, + {"shape":"ForbiddenException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"ThrottledClientException"}, {"shape":"UnauthorizedClientException"}, - {"shape":"ResourceLimitExceededException"} + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} ], - "documentation":"

Deletes the events configuration that allows a bot to receive outgoing events.

" + "documentation":"

Demotes an AppInstanceAdmin to an AppInstanceUser. This action does not delete the user.

", + "endpoint":{"hostPrefix":"identity-"} }, - "DeleteMeeting":{ - "name":"DeleteMeeting", + "DeleteAppInstanceStreamingConfigurations":{ + "name":"DeleteAppInstanceStreamingConfigurations", "http":{ "method":"DELETE", - "requestUri":"/meetings/{meetingId}", + "requestUri":"/app-instances/{appInstanceArn}/streaming-configurations", "responseCode":204 }, - "input":{"shape":"DeleteMeetingRequest"}, + "input":{"shape":"DeleteAppInstanceStreamingConfigurationsRequest"}, "errors":[ {"shape":"BadRequestException"}, {"shape":"ForbiddenException"}, - {"shape":"ThrottledClientException"}, {"shape":"NotFoundException"}, {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Deletes the specified Amazon Chime SDK meeting. When a meeting is deleted, its attendees are also deleted and clients can no longer join it. For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime Developer Guide.

" + "documentation":"

Deletes the streaming configurations of an app instance.

" }, - "DeletePhoneNumber":{ - "name":"DeletePhoneNumber", + "DeleteAppInstanceUser":{ + "name":"DeleteAppInstanceUser", "http":{ "method":"DELETE", - "requestUri":"/phone-numbers/{phoneNumberId}", + "requestUri":"/app-instance-users/{appInstanceUserArn}", "responseCode":204 }, - "input":{"shape":"DeletePhoneNumberRequest"}, + "input":{"shape":"DeleteAppInstanceUserRequest"}, "errors":[ - {"shape":"UnauthorizedClientException"}, - {"shape":"NotFoundException"}, - {"shape":"ForbiddenException"}, {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, {"shape":"ThrottledClientException"}, + {"shape":"UnauthorizedClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Moves the specified phone number into the Deletion queue. A phone number must be disassociated from any users or Amazon Chime Voice Connectors before it can be deleted.

Deleted phone numbers remain in the Deletion queue for 7 days before they are deleted permanently.

" + "documentation":"

Deletes an AppInstanceUser.

", + "endpoint":{"hostPrefix":"identity-"} }, - "DeleteProxySession":{ - "name":"DeleteProxySession", + "DeleteAttendee":{ + "name":"DeleteAttendee", "http":{ "method":"DELETE", - "requestUri":"/voice-connectors/{voiceConnectorId}/proxy-sessions/{proxySessionId}", + "requestUri":"/meetings/{meetingId}/attendees/{attendeeId}", "responseCode":204 }, - "input":{"shape":"DeleteProxySessionRequest"}, + "input":{"shape":"DeleteAttendeeRequest"}, "errors":[ - {"shape":"UnauthorizedClientException"}, - {"shape":"NotFoundException"}, - {"shape":"ForbiddenException"}, {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, {"shape":"ThrottledClientException"}, + {"shape":"NotFoundException"}, + {"shape":"UnauthorizedClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Deletes the specified proxy session from the specified Amazon Chime Voice Connector.

" + "documentation":"

Deletes an attendee from the specified Amazon Chime SDK meeting and deletes their JoinToken. Attendees are automatically deleted when a Amazon Chime SDK meeting is deleted. For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime Developer Guide.

" }, - "DeleteRoom":{ - "name":"DeleteRoom", + "DeleteChannel":{ + "name":"DeleteChannel", "http":{ "method":"DELETE", - "requestUri":"/accounts/{accountId}/rooms/{roomId}", + "requestUri":"/channels/{channelArn}", "responseCode":204 }, - "input":{"shape":"DeleteRoomRequest"}, + "input":{"shape":"DeleteChannelRequest"}, "errors":[ {"shape":"BadRequestException"}, {"shape":"ForbiddenException"}, - {"shape":"NotFoundException"}, {"shape":"UnauthorizedClientException"}, {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Deletes a chat room in an Amazon Chime Enterprise account.

" + "documentation":"

Immediately makes a channel and its memberships inaccessible and marks them for deletion. This is an irreversible process.

", + "endpoint":{"hostPrefix":"messaging-"} }, - "DeleteRoomMembership":{ - "name":"DeleteRoomMembership", + "DeleteChannelBan":{ + "name":"DeleteChannelBan", "http":{ "method":"DELETE", - "requestUri":"/accounts/{accountId}/rooms/{roomId}/memberships/{memberId}", + "requestUri":"/channels/{channelArn}/bans/{memberArn}", "responseCode":204 }, - "input":{"shape":"DeleteRoomMembershipRequest"}, + "input":{"shape":"DeleteChannelBanRequest"}, "errors":[ - {"shape":"UnauthorizedClientException"}, - {"shape":"NotFoundException"}, {"shape":"BadRequestException"}, {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedClientException"}, {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Removes a member from a chat room in an Amazon Chime Enterprise account.

" + "documentation":"

Removes a user from a channel's ban list.

", + "endpoint":{"hostPrefix":"messaging-"} }, - "DeleteVoiceConnector":{ - "name":"DeleteVoiceConnector", + "DeleteChannelMembership":{ + "name":"DeleteChannelMembership", "http":{ "method":"DELETE", - "requestUri":"/voice-connectors/{voiceConnectorId}", + "requestUri":"/channels/{channelArn}/memberships/{memberArn}", "responseCode":204 }, - "input":{"shape":"DeleteVoiceConnectorRequest"}, + "input":{"shape":"DeleteChannelMembershipRequest"}, "errors":[ - {"shape":"UnauthorizedClientException"}, - {"shape":"NotFoundException"}, - {"shape":"ForbiddenException"}, {"shape":"BadRequestException"}, - {"shape":"ConflictException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedClientException"}, {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Deletes the specified Amazon Chime Voice Connector. Any phone numbers associated with the Amazon Chime Voice Connector must be disassociated from it before it can be deleted.

" + "documentation":"

Removes a member from a channel.

", + "endpoint":{"hostPrefix":"messaging-"} }, - "DeleteVoiceConnectorEmergencyCallingConfiguration":{ - "name":"DeleteVoiceConnectorEmergencyCallingConfiguration", + "DeleteChannelMessage":{ + "name":"DeleteChannelMessage", "http":{ "method":"DELETE", - "requestUri":"/voice-connectors/{voiceConnectorId}/emergency-calling-configuration", + "requestUri":"/channels/{channelArn}/messages/{messageId}", "responseCode":204 }, - "input":{"shape":"DeleteVoiceConnectorEmergencyCallingConfigurationRequest"}, + "input":{"shape":"DeleteChannelMessageRequest"}, "errors":[ - {"shape":"UnauthorizedClientException"}, - {"shape":"NotFoundException"}, - {"shape":"ForbiddenException"}, {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedClientException"}, {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Deletes the emergency calling configuration details from the specified Amazon Chime Voice Connector.

" + "documentation":"

Deletes a channel message. Only admins can perform this action. Deletion makes messages inaccessible immediately. A background process deletes any revisions created by UpdateChannelMessage.

", + "endpoint":{"hostPrefix":"messaging-"} }, - "DeleteVoiceConnectorGroup":{ - "name":"DeleteVoiceConnectorGroup", + "DeleteChannelModerator":{ + "name":"DeleteChannelModerator", "http":{ "method":"DELETE", - "requestUri":"/voice-connector-groups/{voiceConnectorGroupId}", + "requestUri":"/channels/{channelArn}/moderators/{channelModeratorArn}", "responseCode":204 }, - "input":{"shape":"DeleteVoiceConnectorGroupRequest"}, + "input":{"shape":"DeleteChannelModeratorRequest"}, "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, {"shape":"UnauthorizedClientException"}, - {"shape":"NotFoundException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Deletes a channel moderator.

", + "endpoint":{"hostPrefix":"messaging-"} + }, + "DeleteEventsConfiguration":{ + "name":"DeleteEventsConfiguration", + "http":{ + "method":"DELETE", + "requestUri":"/accounts/{accountId}/bots/{botId}/events-configuration", + "responseCode":204 + }, + "input":{"shape":"DeleteEventsConfigurationRequest"}, + "errors":[ + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"}, {"shape":"ForbiddenException"}, {"shape":"BadRequestException"}, - {"shape":"ConflictException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ResourceLimitExceededException"} + ], + "documentation":"

Deletes the events configuration that allows a bot to receive outgoing events.

" + }, + "DeleteMeeting":{ + "name":"DeleteMeeting", + "http":{ + "method":"DELETE", + "requestUri":"/meetings/{meetingId}", + "responseCode":204 + }, + "input":{"shape":"DeleteMeetingRequest"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, {"shape":"ThrottledClientException"}, + {"shape":"NotFoundException"}, + {"shape":"UnauthorizedClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Deletes the specified Amazon Chime Voice Connector group. Any VoiceConnectorItems and phone numbers associated with the group must be removed before it can be deleted.

" + "documentation":"

Deletes the specified Amazon Chime SDK meeting. When a meeting is deleted, its attendees are also deleted and clients can no longer join it. For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime Developer Guide.

" }, - "DeleteVoiceConnectorOrigination":{ - "name":"DeleteVoiceConnectorOrigination", + "DeletePhoneNumber":{ + "name":"DeletePhoneNumber", "http":{ "method":"DELETE", - "requestUri":"/voice-connectors/{voiceConnectorId}/origination", + "requestUri":"/phone-numbers/{phoneNumberId}", "responseCode":204 }, - "input":{"shape":"DeleteVoiceConnectorOriginationRequest"}, + "input":{"shape":"DeletePhoneNumberRequest"}, "errors":[ {"shape":"UnauthorizedClientException"}, {"shape":"NotFoundException"}, @@ -712,16 +988,16 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Deletes the origination settings for the specified Amazon Chime Voice Connector.

If emergency calling is configured for the Amazon Chime Voice Connector, it must be deleted prior to deleting the origination settings.

" + "documentation":"

Moves the specified phone number into the Deletion queue. A phone number must be disassociated from any users or Amazon Chime Voice Connectors before it can be deleted.

Deleted phone numbers remain in the Deletion queue for 7 days before they are deleted permanently.

" }, - "DeleteVoiceConnectorProxy":{ - "name":"DeleteVoiceConnectorProxy", + "DeleteProxySession":{ + "name":"DeleteProxySession", "http":{ "method":"DELETE", - "requestUri":"/voice-connectors/{voiceConnectorId}/programmable-numbers/proxy", + "requestUri":"/voice-connectors/{voiceConnectorId}/proxy-sessions/{proxySessionId}", "responseCode":204 }, - "input":{"shape":"DeleteVoiceConnectorProxyRequest"}, + "input":{"shape":"DeleteProxySessionRequest"}, "errors":[ {"shape":"UnauthorizedClientException"}, {"shape":"NotFoundException"}, @@ -731,94 +1007,114 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Deletes the proxy configuration from the specified Amazon Chime Voice Connector.

" + "documentation":"

Deletes the specified proxy session from the specified Amazon Chime Voice Connector.

" }, - "DeleteVoiceConnectorStreamingConfiguration":{ - "name":"DeleteVoiceConnectorStreamingConfiguration", + "DeleteRoom":{ + "name":"DeleteRoom", "http":{ "method":"DELETE", - "requestUri":"/voice-connectors/{voiceConnectorId}/streaming-configuration", + "requestUri":"/accounts/{accountId}/rooms/{roomId}", "responseCode":204 }, - "input":{"shape":"DeleteVoiceConnectorStreamingConfigurationRequest"}, + "input":{"shape":"DeleteRoomRequest"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"NotFoundException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Deletes a chat room in an Amazon Chime Enterprise account.

" + }, + "DeleteRoomMembership":{ + "name":"DeleteRoomMembership", + "http":{ + "method":"DELETE", + "requestUri":"/accounts/{accountId}/rooms/{roomId}/memberships/{memberId}", + "responseCode":204 + }, + "input":{"shape":"DeleteRoomMembershipRequest"}, "errors":[ {"shape":"UnauthorizedClientException"}, {"shape":"NotFoundException"}, - {"shape":"ForbiddenException"}, {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Deletes the streaming configuration for the specified Amazon Chime Voice Connector.

" + "documentation":"

Removes a member from a chat room in an Amazon Chime Enterprise account.

" }, - "DeleteVoiceConnectorTermination":{ - "name":"DeleteVoiceConnectorTermination", + "DeleteSipMediaApplication":{ + "name":"DeleteSipMediaApplication", "http":{ "method":"DELETE", - "requestUri":"/voice-connectors/{voiceConnectorId}/termination", + "requestUri":"/sip-media-applications/{sipMediaApplicationId}", "responseCode":204 }, - "input":{"shape":"DeleteVoiceConnectorTerminationRequest"}, + "input":{"shape":"DeleteSipMediaApplicationRequest"}, "errors":[ {"shape":"UnauthorizedClientException"}, {"shape":"NotFoundException"}, {"shape":"ForbiddenException"}, {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Deletes the termination settings for the specified Amazon Chime Voice Connector.

If emergency calling is configured for the Amazon Chime Voice Connector, it must be deleted prior to deleting the termination settings.

" + "documentation":"

Deletes a SIP media application.

" }, - "DeleteVoiceConnectorTerminationCredentials":{ - "name":"DeleteVoiceConnectorTerminationCredentials", + "DeleteSipRule":{ + "name":"DeleteSipRule", "http":{ - "method":"POST", - "requestUri":"/voice-connectors/{voiceConnectorId}/termination/credentials?operation=delete", + "method":"DELETE", + "requestUri":"/sip-rules/{sipRuleId}", "responseCode":204 }, - "input":{"shape":"DeleteVoiceConnectorTerminationCredentialsRequest"}, + "input":{"shape":"DeleteSipRuleRequest"}, "errors":[ {"shape":"UnauthorizedClientException"}, {"shape":"NotFoundException"}, {"shape":"ForbiddenException"}, {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Deletes the specified SIP credentials used by your equipment to authenticate during call termination.

" + "documentation":"

Deletes a SIP rule. You must disable a SIP rule before you can delete it.

" }, - "DisassociatePhoneNumberFromUser":{ - "name":"DisassociatePhoneNumberFromUser", + "DeleteVoiceConnector":{ + "name":"DeleteVoiceConnector", "http":{ - "method":"POST", - "requestUri":"/accounts/{accountId}/users/{userId}?operation=disassociate-phone-number", - "responseCode":200 + "method":"DELETE", + "requestUri":"/voice-connectors/{voiceConnectorId}", + "responseCode":204 }, - "input":{"shape":"DisassociatePhoneNumberFromUserRequest"}, - "output":{"shape":"DisassociatePhoneNumberFromUserResponse"}, + "input":{"shape":"DeleteVoiceConnectorRequest"}, "errors":[ {"shape":"UnauthorizedClientException"}, {"shape":"NotFoundException"}, {"shape":"ForbiddenException"}, {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Disassociates the primary provisioned phone number from the specified Amazon Chime user.

" + "documentation":"

Deletes the specified Amazon Chime Voice Connector. Any phone numbers associated with the Amazon Chime Voice Connector must be disassociated from it before it can be deleted.

" }, - "DisassociatePhoneNumbersFromVoiceConnector":{ - "name":"DisassociatePhoneNumbersFromVoiceConnector", + "DeleteVoiceConnectorEmergencyCallingConfiguration":{ + "name":"DeleteVoiceConnectorEmergencyCallingConfiguration", "http":{ - "method":"POST", - "requestUri":"/voice-connectors/{voiceConnectorId}?operation=disassociate-phone-numbers", - "responseCode":200 + "method":"DELETE", + "requestUri":"/voice-connectors/{voiceConnectorId}/emergency-calling-configuration", + "responseCode":204 }, - "input":{"shape":"DisassociatePhoneNumbersFromVoiceConnectorRequest"}, - "output":{"shape":"DisassociatePhoneNumbersFromVoiceConnectorResponse"}, + "input":{"shape":"DeleteVoiceConnectorEmergencyCallingConfigurationRequest"}, "errors":[ {"shape":"UnauthorizedClientException"}, {"shape":"NotFoundException"}, @@ -828,37 +1124,36 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Disassociates the specified phone numbers from the specified Amazon Chime Voice Connector.

" + "documentation":"

Deletes the emergency calling configuration details from the specified Amazon Chime Voice Connector.

" }, - "DisassociatePhoneNumbersFromVoiceConnectorGroup":{ - "name":"DisassociatePhoneNumbersFromVoiceConnectorGroup", + "DeleteVoiceConnectorGroup":{ + "name":"DeleteVoiceConnectorGroup", "http":{ - "method":"POST", - "requestUri":"/voice-connector-groups/{voiceConnectorGroupId}?operation=disassociate-phone-numbers", - "responseCode":200 + "method":"DELETE", + "requestUri":"/voice-connector-groups/{voiceConnectorGroupId}", + "responseCode":204 }, - "input":{"shape":"DisassociatePhoneNumbersFromVoiceConnectorGroupRequest"}, - "output":{"shape":"DisassociatePhoneNumbersFromVoiceConnectorGroupResponse"}, + "input":{"shape":"DeleteVoiceConnectorGroupRequest"}, "errors":[ {"shape":"UnauthorizedClientException"}, {"shape":"NotFoundException"}, {"shape":"ForbiddenException"}, {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Disassociates the specified phone numbers from the specified Amazon Chime Voice Connector group.

" + "documentation":"

Deletes the specified Amazon Chime Voice Connector group. Any VoiceConnectorItems and phone numbers associated with the group must be removed before it can be deleted.

" }, - "DisassociateSigninDelegateGroupsFromAccount":{ - "name":"DisassociateSigninDelegateGroupsFromAccount", + "DeleteVoiceConnectorOrigination":{ + "name":"DeleteVoiceConnectorOrigination", "http":{ - "method":"POST", - "requestUri":"/accounts/{accountId}?operation=disassociate-signin-delegate-groups", - "responseCode":200 + "method":"DELETE", + "requestUri":"/voice-connectors/{voiceConnectorId}/origination", + "responseCode":204 }, - "input":{"shape":"DisassociateSigninDelegateGroupsFromAccountRequest"}, - "output":{"shape":"DisassociateSigninDelegateGroupsFromAccountResponse"}, + "input":{"shape":"DeleteVoiceConnectorOriginationRequest"}, "errors":[ {"shape":"UnauthorizedClientException"}, {"shape":"NotFoundException"}, @@ -868,16 +1163,16 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Disassociates the specified sign-in delegate groups from the specified Amazon Chime account.

" + "documentation":"

Deletes the origination settings for the specified Amazon Chime Voice Connector.

If emergency calling is configured for the Amazon Chime Voice Connector, it must be deleted prior to deleting the origination settings.

" }, - "GetAccount":{ - "name":"GetAccount", + "DeleteVoiceConnectorProxy":{ + "name":"DeleteVoiceConnectorProxy", "http":{ - "method":"GET", - "requestUri":"/accounts/{accountId}" + "method":"DELETE", + "requestUri":"/voice-connectors/{voiceConnectorId}/programmable-numbers/proxy", + "responseCode":204 }, - "input":{"shape":"GetAccountRequest"}, - "output":{"shape":"GetAccountResponse"}, + "input":{"shape":"DeleteVoiceConnectorProxyRequest"}, "errors":[ {"shape":"UnauthorizedClientException"}, {"shape":"NotFoundException"}, @@ -887,230 +1182,236 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Retrieves details for the specified Amazon Chime account, such as account type and supported licenses.

" + "documentation":"

Deletes the proxy configuration from the specified Amazon Chime Voice Connector.

" }, - "GetAccountSettings":{ - "name":"GetAccountSettings", + "DeleteVoiceConnectorStreamingConfiguration":{ + "name":"DeleteVoiceConnectorStreamingConfiguration", "http":{ - "method":"GET", - "requestUri":"/accounts/{accountId}/settings" + "method":"DELETE", + "requestUri":"/voice-connectors/{voiceConnectorId}/streaming-configuration", + "responseCode":204 }, - "input":{"shape":"GetAccountSettingsRequest"}, - "output":{"shape":"GetAccountSettingsResponse"}, + "input":{"shape":"DeleteVoiceConnectorStreamingConfigurationRequest"}, "errors":[ {"shape":"UnauthorizedClientException"}, {"shape":"NotFoundException"}, - {"shape":"BadRequestException"}, {"shape":"ForbiddenException"}, + {"shape":"BadRequestException"}, {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Retrieves account settings for the specified Amazon Chime account ID, such as remote control and dial out settings. For more information about these settings, see Use the Policies Page in the Amazon Chime Administration Guide.

" + "documentation":"

Deletes the streaming configuration for the specified Amazon Chime Voice Connector.

" }, - "GetAttendee":{ - "name":"GetAttendee", + "DeleteVoiceConnectorTermination":{ + "name":"DeleteVoiceConnectorTermination", "http":{ - "method":"GET", - "requestUri":"/meetings/{meetingId}/attendees/{attendeeId}", - "responseCode":200 + "method":"DELETE", + "requestUri":"/voice-connectors/{voiceConnectorId}/termination", + "responseCode":204 }, - "input":{"shape":"GetAttendeeRequest"}, - "output":{"shape":"GetAttendeeResponse"}, + "input":{"shape":"DeleteVoiceConnectorTerminationRequest"}, "errors":[ - {"shape":"BadRequestException"}, - {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedClientException"}, {"shape":"NotFoundException"}, + {"shape":"ForbiddenException"}, + {"shape":"BadRequestException"}, {"shape":"ThrottledClientException"}, - {"shape":"UnauthorizedClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Gets the Amazon Chime SDK attendee details for a specified meeting ID and attendee ID. For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime Developer Guide.

" + "documentation":"

Deletes the termination settings for the specified Amazon Chime Voice Connector.

If emergency calling is configured for the Amazon Chime Voice Connector, it must be deleted prior to deleting the termination settings.

" }, - "GetBot":{ - "name":"GetBot", + "DeleteVoiceConnectorTerminationCredentials":{ + "name":"DeleteVoiceConnectorTerminationCredentials", "http":{ - "method":"GET", - "requestUri":"/accounts/{accountId}/bots/{botId}", - "responseCode":200 + "method":"POST", + "requestUri":"/voice-connectors/{voiceConnectorId}/termination/credentials?operation=delete", + "responseCode":204 }, - "input":{"shape":"GetBotRequest"}, - "output":{"shape":"GetBotResponse"}, + "input":{"shape":"DeleteVoiceConnectorTerminationCredentialsRequest"}, "errors":[ - {"shape":"ServiceUnavailableException"}, - {"shape":"ServiceFailureException"}, - {"shape":"ForbiddenException"}, {"shape":"UnauthorizedClientException"}, {"shape":"NotFoundException"}, + {"shape":"ForbiddenException"}, {"shape":"BadRequestException"}, - {"shape":"ThrottledClientException"} + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} ], - "documentation":"

Retrieves details for the specified bot, such as bot email address, bot type, status, and display name.

" + "documentation":"

Deletes the specified SIP credentials used by your equipment to authenticate during call termination.

" }, - "GetEventsConfiguration":{ - "name":"GetEventsConfiguration", + "DescribeAppInstance":{ + "name":"DescribeAppInstance", "http":{ "method":"GET", - "requestUri":"/accounts/{accountId}/bots/{botId}/events-configuration", + "requestUri":"/app-instances/{appInstanceArn}", "responseCode":200 }, - "input":{"shape":"GetEventsConfigurationRequest"}, - "output":{"shape":"GetEventsConfigurationResponse"}, + "input":{"shape":"DescribeAppInstanceRequest"}, + "output":{"shape":"DescribeAppInstanceResponse"}, "errors":[ - {"shape":"ServiceUnavailableException"}, - {"shape":"ServiceFailureException"}, - {"shape":"ForbiddenException"}, {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"ThrottledClientException"}, {"shape":"UnauthorizedClientException"}, - {"shape":"ResourceLimitExceededException"}, - {"shape":"NotFoundException"} - ], - "documentation":"

Gets details for an events configuration that allows a bot to receive outgoing events, such as an HTTPS endpoint or Lambda function ARN.

" + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Returns the full details of an AppInstance.

", + "endpoint":{"hostPrefix":"identity-"} }, - "GetGlobalSettings":{ - "name":"GetGlobalSettings", + "DescribeAppInstanceAdmin":{ + "name":"DescribeAppInstanceAdmin", "http":{ "method":"GET", - "requestUri":"/settings", + "requestUri":"/app-instances/{appInstanceArn}/admins/{appInstanceAdminArn}", "responseCode":200 }, - "output":{"shape":"GetGlobalSettingsResponse"}, + "input":{"shape":"DescribeAppInstanceAdminRequest"}, + "output":{"shape":"DescribeAppInstanceAdminResponse"}, "errors":[ - {"shape":"UnauthorizedClientException"}, - {"shape":"ForbiddenException"}, {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, {"shape":"ThrottledClientException"}, + {"shape":"UnauthorizedClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Retrieves global settings for the administrator's AWS account, such as Amazon Chime Business Calling and Amazon Chime Voice Connector settings.

" + "documentation":"

Returns the full details of an AppInstanceAdmin.

", + "endpoint":{"hostPrefix":"identity-"} }, - "GetMeeting":{ - "name":"GetMeeting", + "DescribeAppInstanceUser":{ + "name":"DescribeAppInstanceUser", "http":{ "method":"GET", - "requestUri":"/meetings/{meetingId}", + "requestUri":"/app-instance-users/{appInstanceUserArn}", "responseCode":200 }, - "input":{"shape":"GetMeetingRequest"}, - "output":{"shape":"GetMeetingResponse"}, + "input":{"shape":"DescribeAppInstanceUserRequest"}, + "output":{"shape":"DescribeAppInstanceUserResponse"}, "errors":[ {"shape":"BadRequestException"}, {"shape":"ForbiddenException"}, - {"shape":"NotFoundException"}, {"shape":"ThrottledClientException"}, {"shape":"UnauthorizedClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Gets the Amazon Chime SDK meeting details for the specified meeting ID. For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime Developer Guide.

" + "documentation":"

Returns the full details of an AppInstanceUser.

", + "endpoint":{"hostPrefix":"identity-"} }, - "GetPhoneNumber":{ - "name":"GetPhoneNumber", + "DescribeChannel":{ + "name":"DescribeChannel", "http":{ "method":"GET", - "requestUri":"/phone-numbers/{phoneNumberId}" + "requestUri":"/channels/{channelArn}", + "responseCode":200 }, - "input":{"shape":"GetPhoneNumberRequest"}, - "output":{"shape":"GetPhoneNumberResponse"}, + "input":{"shape":"DescribeChannelRequest"}, + "output":{"shape":"DescribeChannelResponse"}, "errors":[ - {"shape":"UnauthorizedClientException"}, - {"shape":"NotFoundException"}, - {"shape":"ForbiddenException"}, {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedClientException"}, {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Retrieves details for the specified phone number ID, such as associations, capabilities, and product type.

" + "documentation":"

Returns the full details of a channel in an Amazon Chime app instance.

", + "endpoint":{"hostPrefix":"messaging-"} }, - "GetPhoneNumberOrder":{ - "name":"GetPhoneNumberOrder", + "DescribeChannelBan":{ + "name":"DescribeChannelBan", "http":{ "method":"GET", - "requestUri":"/phone-number-orders/{phoneNumberOrderId}", + "requestUri":"/channels/{channelArn}/bans/{memberArn}", "responseCode":200 }, - "input":{"shape":"GetPhoneNumberOrderRequest"}, - "output":{"shape":"GetPhoneNumberOrderResponse"}, + "input":{"shape":"DescribeChannelBanRequest"}, + "output":{"shape":"DescribeChannelBanResponse"}, "errors":[ - {"shape":"UnauthorizedClientException"}, - {"shape":"NotFoundException"}, - {"shape":"ForbiddenException"}, {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"NotFoundException"}, + {"shape":"UnauthorizedClientException"}, {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Retrieves details for the specified phone number order, such as order creation timestamp, phone numbers in E.164 format, product type, and order status.

" + "documentation":"

Returns the full details of a channel ban.

", + "endpoint":{"hostPrefix":"messaging-"} }, - "GetPhoneNumberSettings":{ - "name":"GetPhoneNumberSettings", + "DescribeChannelMembership":{ + "name":"DescribeChannelMembership", "http":{ "method":"GET", - "requestUri":"/settings/phone-number", + "requestUri":"/channels/{channelArn}/memberships/{memberArn}", "responseCode":200 }, - "output":{"shape":"GetPhoneNumberSettingsResponse"}, + "input":{"shape":"DescribeChannelMembershipRequest"}, + "output":{"shape":"DescribeChannelMembershipResponse"}, "errors":[ - {"shape":"UnauthorizedClientException"}, - {"shape":"ForbiddenException"}, {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"NotFoundException"}, + {"shape":"UnauthorizedClientException"}, {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Retrieves the phone number settings for the administrator's AWS account, such as the default outbound calling name.

" + "documentation":"

Returns the full details of a user's channel membership.

", + "endpoint":{"hostPrefix":"messaging-"} }, - "GetProxySession":{ - "name":"GetProxySession", + "DescribeChannelMembershipForAppInstanceUser":{ + "name":"DescribeChannelMembershipForAppInstanceUser", "http":{ "method":"GET", - "requestUri":"/voice-connectors/{voiceConnectorId}/proxy-sessions/{proxySessionId}", + "requestUri":"/channels/{channelArn}?scope=app-instance-user-membership", "responseCode":200 }, - "input":{"shape":"GetProxySessionRequest"}, - "output":{"shape":"GetProxySessionResponse"}, + "input":{"shape":"DescribeChannelMembershipForAppInstanceUserRequest"}, + "output":{"shape":"DescribeChannelMembershipForAppInstanceUserResponse"}, "errors":[ - {"shape":"UnauthorizedClientException"}, - {"shape":"NotFoundException"}, - {"shape":"ForbiddenException"}, {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedClientException"}, {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Gets the specified proxy session details for the specified Amazon Chime Voice Connector.

" + "documentation":"

Returns the details of a channel based on the membership of the AppInstanceUser specified.

", + "endpoint":{"hostPrefix":"messaging-"} }, - "GetRetentionSettings":{ - "name":"GetRetentionSettings", + "DescribeChannelModeratedByAppInstanceUser":{ + "name":"DescribeChannelModeratedByAppInstanceUser", "http":{ "method":"GET", - "requestUri":"/accounts/{accountId}/retention-settings" + "requestUri":"/channels/{channelArn}?scope=app-instance-user-moderated-channel", + "responseCode":200 }, - "input":{"shape":"GetRetentionSettingsRequest"}, - "output":{"shape":"GetRetentionSettingsResponse"}, + "input":{"shape":"DescribeChannelModeratedByAppInstanceUserRequest"}, + "output":{"shape":"DescribeChannelModeratedByAppInstanceUserResponse"}, "errors":[ - {"shape":"UnauthorizedClientException"}, - {"shape":"NotFoundException"}, {"shape":"BadRequestException"}, {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedClientException"}, {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Gets the retention settings for the specified Amazon Chime Enterprise account. For more information about retention settings, see Managing Chat Retention Policies in the Amazon Chime Administration Guide.

" + "documentation":"

Returns the full details of a channel moderated by the specified AppInstanceUser.

", + "endpoint":{"hostPrefix":"messaging-"} }, - "GetRoom":{ - "name":"GetRoom", + "DescribeChannelModerator":{ + "name":"DescribeChannelModerator", "http":{ "method":"GET", - "requestUri":"/accounts/{accountId}/rooms/{roomId}", + "requestUri":"/channels/{channelArn}/moderators/{channelModeratorArn}", "responseCode":200 }, - "input":{"shape":"GetRoomRequest"}, - "output":{"shape":"GetRoomResponse"}, + "input":{"shape":"DescribeChannelModeratorRequest"}, + "output":{"shape":"DescribeChannelModeratorResponse"}, "errors":[ {"shape":"BadRequestException"}, {"shape":"ForbiddenException"}, @@ -1120,17 +1421,18 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Retrieves room details, such as the room name, for a room in an Amazon Chime Enterprise account.

" + "documentation":"

Returns the full details of a single ChannelModerator.

", + "endpoint":{"hostPrefix":"messaging-"} }, - "GetUser":{ - "name":"GetUser", + "DisassociatePhoneNumberFromUser":{ + "name":"DisassociatePhoneNumberFromUser", "http":{ - "method":"GET", - "requestUri":"/accounts/{accountId}/users/{userId}", + "method":"POST", + "requestUri":"/accounts/{accountId}/users/{userId}?operation=disassociate-phone-number", "responseCode":200 }, - "input":{"shape":"GetUserRequest"}, - "output":{"shape":"GetUserResponse"}, + "input":{"shape":"DisassociatePhoneNumberFromUserRequest"}, + "output":{"shape":"DisassociatePhoneNumberFromUserResponse"}, "errors":[ {"shape":"UnauthorizedClientException"}, {"shape":"NotFoundException"}, @@ -1140,17 +1442,17 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Retrieves details for the specified user ID, such as primary email address, license type, and personal meeting PIN.

To retrieve user details with an email address instead of a user ID, use the ListUsers action, and then filter by email address.

" + "documentation":"

Disassociates the primary provisioned phone number from the specified Amazon Chime user.

" }, - "GetUserSettings":{ - "name":"GetUserSettings", + "DisassociatePhoneNumbersFromVoiceConnector":{ + "name":"DisassociatePhoneNumbersFromVoiceConnector", "http":{ - "method":"GET", - "requestUri":"/accounts/{accountId}/users/{userId}/settings", + "method":"POST", + "requestUri":"/voice-connectors/{voiceConnectorId}?operation=disassociate-phone-numbers", "responseCode":200 }, - "input":{"shape":"GetUserSettingsRequest"}, - "output":{"shape":"GetUserSettingsResponse"}, + "input":{"shape":"DisassociatePhoneNumbersFromVoiceConnectorRequest"}, + "output":{"shape":"DisassociatePhoneNumbersFromVoiceConnectorResponse"}, "errors":[ {"shape":"UnauthorizedClientException"}, {"shape":"NotFoundException"}, @@ -1160,17 +1462,17 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Retrieves settings for the specified user ID, such as any associated phone number settings.

" + "documentation":"

Disassociates the specified phone numbers from the specified Amazon Chime Voice Connector.

" }, - "GetVoiceConnector":{ - "name":"GetVoiceConnector", + "DisassociatePhoneNumbersFromVoiceConnectorGroup":{ + "name":"DisassociatePhoneNumbersFromVoiceConnectorGroup", "http":{ - "method":"GET", - "requestUri":"/voice-connectors/{voiceConnectorId}", + "method":"POST", + "requestUri":"/voice-connector-groups/{voiceConnectorGroupId}?operation=disassociate-phone-numbers", "responseCode":200 }, - "input":{"shape":"GetVoiceConnectorRequest"}, - "output":{"shape":"GetVoiceConnectorResponse"}, + "input":{"shape":"DisassociatePhoneNumbersFromVoiceConnectorGroupRequest"}, + "output":{"shape":"DisassociatePhoneNumbersFromVoiceConnectorGroupResponse"}, "errors":[ {"shape":"UnauthorizedClientException"}, {"shape":"NotFoundException"}, @@ -1180,17 +1482,17 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Retrieves details for the specified Amazon Chime Voice Connector, such as timestamps, name, outbound host, and encryption requirements.

" + "documentation":"

Disassociates the specified phone numbers from the specified Amazon Chime Voice Connector group.

" }, - "GetVoiceConnectorEmergencyCallingConfiguration":{ - "name":"GetVoiceConnectorEmergencyCallingConfiguration", + "DisassociateSigninDelegateGroupsFromAccount":{ + "name":"DisassociateSigninDelegateGroupsFromAccount", "http":{ - "method":"GET", - "requestUri":"/voice-connectors/{voiceConnectorId}/emergency-calling-configuration", + "method":"POST", + "requestUri":"/accounts/{accountId}?operation=disassociate-signin-delegate-groups", "responseCode":200 }, - "input":{"shape":"GetVoiceConnectorEmergencyCallingConfigurationRequest"}, - "output":{"shape":"GetVoiceConnectorEmergencyCallingConfigurationResponse"}, + "input":{"shape":"DisassociateSigninDelegateGroupsFromAccountRequest"}, + "output":{"shape":"DisassociateSigninDelegateGroupsFromAccountResponse"}, "errors":[ {"shape":"UnauthorizedClientException"}, {"shape":"NotFoundException"}, @@ -1200,17 +1502,16 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Gets the emergency calling configuration details for the specified Amazon Chime Voice Connector.

" + "documentation":"

Disassociates the specified sign-in delegate groups from the specified Amazon Chime account.

" }, - "GetVoiceConnectorGroup":{ - "name":"GetVoiceConnectorGroup", + "GetAccount":{ + "name":"GetAccount", "http":{ "method":"GET", - "requestUri":"/voice-connector-groups/{voiceConnectorGroupId}", - "responseCode":200 + "requestUri":"/accounts/{accountId}" }, - "input":{"shape":"GetVoiceConnectorGroupRequest"}, - "output":{"shape":"GetVoiceConnectorGroupResponse"}, + "input":{"shape":"GetAccountRequest"}, + "output":{"shape":"GetAccountResponse"}, "errors":[ {"shape":"UnauthorizedClientException"}, {"shape":"NotFoundException"}, @@ -1220,176 +1521,176 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Retrieves details for the specified Amazon Chime Voice Connector group, such as timestamps, name, and associated VoiceConnectorItems.

" + "documentation":"

Retrieves details for the specified Amazon Chime account, such as account type and supported licenses.

" }, - "GetVoiceConnectorLoggingConfiguration":{ - "name":"GetVoiceConnectorLoggingConfiguration", + "GetAccountSettings":{ + "name":"GetAccountSettings", "http":{ "method":"GET", - "requestUri":"/voice-connectors/{voiceConnectorId}/logging-configuration", - "responseCode":200 + "requestUri":"/accounts/{accountId}/settings" }, - "input":{"shape":"GetVoiceConnectorLoggingConfigurationRequest"}, - "output":{"shape":"GetVoiceConnectorLoggingConfigurationResponse"}, + "input":{"shape":"GetAccountSettingsRequest"}, + "output":{"shape":"GetAccountSettingsResponse"}, "errors":[ {"shape":"UnauthorizedClientException"}, {"shape":"NotFoundException"}, - {"shape":"ForbiddenException"}, {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Retrieves the logging configuration details for the specified Amazon Chime Voice Connector. Shows whether SIP message logs are enabled for sending to Amazon CloudWatch Logs.

" + "documentation":"

Retrieves account settings for the specified Amazon Chime account ID, such as remote control and dial out settings. For more information about these settings, see Use the Policies Page in the Amazon Chime Administration Guide.

" }, - "GetVoiceConnectorOrigination":{ - "name":"GetVoiceConnectorOrigination", + "GetAppInstanceRetentionSettings":{ + "name":"GetAppInstanceRetentionSettings", "http":{ "method":"GET", - "requestUri":"/voice-connectors/{voiceConnectorId}/origination", + "requestUri":"/app-instances/{appInstanceArn}/retention-settings", "responseCode":200 }, - "input":{"shape":"GetVoiceConnectorOriginationRequest"}, - "output":{"shape":"GetVoiceConnectorOriginationResponse"}, + "input":{"shape":"GetAppInstanceRetentionSettingsRequest"}, + "output":{"shape":"GetAppInstanceRetentionSettingsResponse"}, "errors":[ {"shape":"UnauthorizedClientException"}, {"shape":"NotFoundException"}, - {"shape":"ForbiddenException"}, {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Retrieves origination setting details for the specified Amazon Chime Voice Connector.

" + "documentation":"

Gets the retention settings for an app instance.

", + "endpoint":{"hostPrefix":"identity-"} }, - "GetVoiceConnectorProxy":{ - "name":"GetVoiceConnectorProxy", + "GetAppInstanceStreamingConfigurations":{ + "name":"GetAppInstanceStreamingConfigurations", "http":{ "method":"GET", - "requestUri":"/voice-connectors/{voiceConnectorId}/programmable-numbers/proxy", + "requestUri":"/app-instances/{appInstanceArn}/streaming-configurations", "responseCode":200 }, - "input":{"shape":"GetVoiceConnectorProxyRequest"}, - "output":{"shape":"GetVoiceConnectorProxyResponse"}, + "input":{"shape":"GetAppInstanceStreamingConfigurationsRequest"}, + "output":{"shape":"GetAppInstanceStreamingConfigurationsResponse"}, "errors":[ - {"shape":"UnauthorizedClientException"}, - {"shape":"NotFoundException"}, - {"shape":"ForbiddenException"}, {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"NotFoundException"}, + {"shape":"UnauthorizedClientException"}, {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Gets the proxy configuration details for the specified Amazon Chime Voice Connector.

" + "documentation":"

Gets the streaming settings for an app instance.

" }, - "GetVoiceConnectorStreamingConfiguration":{ - "name":"GetVoiceConnectorStreamingConfiguration", + "GetAttendee":{ + "name":"GetAttendee", "http":{ "method":"GET", - "requestUri":"/voice-connectors/{voiceConnectorId}/streaming-configuration", + "requestUri":"/meetings/{meetingId}/attendees/{attendeeId}", "responseCode":200 }, - "input":{"shape":"GetVoiceConnectorStreamingConfigurationRequest"}, - "output":{"shape":"GetVoiceConnectorStreamingConfigurationResponse"}, + "input":{"shape":"GetAttendeeRequest"}, + "output":{"shape":"GetAttendeeResponse"}, "errors":[ - {"shape":"UnauthorizedClientException"}, - {"shape":"NotFoundException"}, - {"shape":"ForbiddenException"}, {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"NotFoundException"}, {"shape":"ThrottledClientException"}, + {"shape":"UnauthorizedClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Retrieves the streaming configuration details for the specified Amazon Chime Voice Connector. Shows whether media streaming is enabled for sending to Amazon Kinesis. It also shows the retention period, in hours, for the Amazon Kinesis data.

" + "documentation":"

Gets the Amazon Chime SDK attendee details for a specified meeting ID and attendee ID. For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime Developer Guide.

" }, - "GetVoiceConnectorTermination":{ - "name":"GetVoiceConnectorTermination", + "GetBot":{ + "name":"GetBot", "http":{ "method":"GET", - "requestUri":"/voice-connectors/{voiceConnectorId}/termination", + "requestUri":"/accounts/{accountId}/bots/{botId}", "responseCode":200 }, - "input":{"shape":"GetVoiceConnectorTerminationRequest"}, - "output":{"shape":"GetVoiceConnectorTerminationResponse"}, + "input":{"shape":"GetBotRequest"}, + "output":{"shape":"GetBotResponse"}, "errors":[ + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"}, + {"shape":"ForbiddenException"}, {"shape":"UnauthorizedClientException"}, {"shape":"NotFoundException"}, - {"shape":"ForbiddenException"}, {"shape":"BadRequestException"}, - {"shape":"ThrottledClientException"}, - {"shape":"ServiceUnavailableException"}, - {"shape":"ServiceFailureException"} + {"shape":"ThrottledClientException"} ], - "documentation":"

Retrieves termination setting details for the specified Amazon Chime Voice Connector.

" + "documentation":"

Retrieves details for the specified bot, such as bot email address, bot type, status, and display name.

" }, - "GetVoiceConnectorTerminationHealth":{ - "name":"GetVoiceConnectorTerminationHealth", + "GetChannelMessage":{ + "name":"GetChannelMessage", "http":{ "method":"GET", - "requestUri":"/voice-connectors/{voiceConnectorId}/termination/health", + "requestUri":"/channels/{channelArn}/messages/{messageId}", "responseCode":200 }, - "input":{"shape":"GetVoiceConnectorTerminationHealthRequest"}, - "output":{"shape":"GetVoiceConnectorTerminationHealthResponse"}, + "input":{"shape":"GetChannelMessageRequest"}, + "output":{"shape":"GetChannelMessageResponse"}, "errors":[ - {"shape":"UnauthorizedClientException"}, - {"shape":"NotFoundException"}, - {"shape":"ForbiddenException"}, {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"NotFoundException"}, + {"shape":"UnauthorizedClientException"}, {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Retrieves information about the last time a SIP OPTIONS ping was received from your SIP infrastructure for the specified Amazon Chime Voice Connector.

" + "documentation":"

Gets the full details of a channel message.

", + "endpoint":{"hostPrefix":"messaging-"} }, - "InviteUsers":{ - "name":"InviteUsers", + "GetEventsConfiguration":{ + "name":"GetEventsConfiguration", "http":{ - "method":"POST", - "requestUri":"/accounts/{accountId}/users?operation=add", - "responseCode":201 + "method":"GET", + "requestUri":"/accounts/{accountId}/bots/{botId}/events-configuration", + "responseCode":200 }, - "input":{"shape":"InviteUsersRequest"}, - "output":{"shape":"InviteUsersResponse"}, + "input":{"shape":"GetEventsConfigurationRequest"}, + "output":{"shape":"GetEventsConfigurationResponse"}, "errors":[ - {"shape":"UnauthorizedClientException"}, - {"shape":"NotFoundException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"}, {"shape":"ForbiddenException"}, {"shape":"BadRequestException"}, - {"shape":"ThrottledClientException"}, - {"shape":"ServiceUnavailableException"}, - {"shape":"ServiceFailureException"} - ], - "documentation":"

Sends email to a maximum of 50 users, inviting them to the specified Amazon Chime Team account. Only Team account types are currently supported for this action.

" + {"shape":"UnauthorizedClientException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"NotFoundException"} + ], + "documentation":"

Gets details for an events configuration that allows a bot to receive outgoing events, such as an HTTPS endpoint or Lambda function ARN.

" }, - "ListAccounts":{ - "name":"ListAccounts", + "GetGlobalSettings":{ + "name":"GetGlobalSettings", "http":{ "method":"GET", - "requestUri":"/accounts" + "requestUri":"/settings", + "responseCode":200 }, - "input":{"shape":"ListAccountsRequest"}, - "output":{"shape":"ListAccountsResponse"}, + "output":{"shape":"GetGlobalSettingsResponse"}, "errors":[ {"shape":"UnauthorizedClientException"}, - {"shape":"NotFoundException"}, {"shape":"ForbiddenException"}, {"shape":"BadRequestException"}, {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Lists the Amazon Chime accounts under the administrator's AWS account. You can filter accounts by account name prefix. To find out which Amazon Chime account a user belongs to, you can filter by the user's email address, which returns one account result.

" + "documentation":"

Retrieves global settings for the administrator's AWS account, such as Amazon Chime Business Calling and Amazon Chime Voice Connector settings.

" }, - "ListAttendeeTags":{ - "name":"ListAttendeeTags", + "GetMeeting":{ + "name":"GetMeeting", "http":{ "method":"GET", - "requestUri":"/meetings/{meetingId}/attendees/{attendeeId}/tags", + "requestUri":"/meetings/{meetingId}", "responseCode":200 }, - "input":{"shape":"ListAttendeeTagsRequest"}, - "output":{"shape":"ListAttendeeTagsResponse"}, + "input":{"shape":"GetMeetingRequest"}, + "output":{"shape":"GetMeetingResponse"}, "errors":[ {"shape":"BadRequestException"}, {"shape":"ForbiddenException"}, @@ -1399,133 +1700,152 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Lists the tags applied to an Amazon Chime SDK attendee resource.

" + "documentation":"

Gets the Amazon Chime SDK meeting details for the specified meeting ID. For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime Developer Guide.

" }, - "ListAttendees":{ - "name":"ListAttendees", + "GetMessagingSessionEndpoint":{ + "name":"GetMessagingSessionEndpoint", "http":{ "method":"GET", - "requestUri":"/meetings/{meetingId}/attendees", + "requestUri":"/endpoints/messaging-session", "responseCode":200 }, - "input":{"shape":"ListAttendeesRequest"}, - "output":{"shape":"ListAttendeesResponse"}, + "input":{"shape":"GetMessagingSessionEndpointRequest"}, + "output":{"shape":"GetMessagingSessionEndpointResponse"}, "errors":[ - {"shape":"BadRequestException"}, + {"shape":"UnauthorizedClientException"}, {"shape":"ForbiddenException"}, - {"shape":"NotFoundException"}, {"shape":"ThrottledClientException"}, - {"shape":"UnauthorizedClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Lists the attendees for the specified Amazon Chime SDK meeting. For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime Developer Guide.

" + "documentation":"

The endpoint for the messaging session.

", + "endpoint":{"hostPrefix":"messaging-"} }, - "ListBots":{ - "name":"ListBots", + "GetPhoneNumber":{ + "name":"GetPhoneNumber", "http":{ "method":"GET", - "requestUri":"/accounts/{accountId}/bots", - "responseCode":200 + "requestUri":"/phone-numbers/{phoneNumberId}" }, - "input":{"shape":"ListBotsRequest"}, - "output":{"shape":"ListBotsResponse"}, + "input":{"shape":"GetPhoneNumberRequest"}, + "output":{"shape":"GetPhoneNumberResponse"}, "errors":[ - {"shape":"ServiceUnavailableException"}, - {"shape":"ServiceFailureException"}, - {"shape":"ForbiddenException"}, {"shape":"UnauthorizedClientException"}, - {"shape":"BadRequestException"}, {"shape":"NotFoundException"}, - {"shape":"ThrottledClientException"} + {"shape":"ForbiddenException"}, + {"shape":"BadRequestException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} ], - "documentation":"

Lists the bots associated with the administrator's Amazon Chime Enterprise account ID.

" + "documentation":"

Retrieves details for the specified phone number ID, such as associations, capabilities, and product type.

" }, - "ListMeetingTags":{ - "name":"ListMeetingTags", + "GetPhoneNumberOrder":{ + "name":"GetPhoneNumberOrder", "http":{ "method":"GET", - "requestUri":"/meetings/{meetingId}/tags", + "requestUri":"/phone-number-orders/{phoneNumberOrderId}", "responseCode":200 }, - "input":{"shape":"ListMeetingTagsRequest"}, - "output":{"shape":"ListMeetingTagsResponse"}, + "input":{"shape":"GetPhoneNumberOrderRequest"}, + "output":{"shape":"GetPhoneNumberOrderResponse"}, "errors":[ - {"shape":"BadRequestException"}, - {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedClientException"}, {"shape":"NotFoundException"}, + {"shape":"ForbiddenException"}, + {"shape":"BadRequestException"}, {"shape":"ThrottledClientException"}, - {"shape":"UnauthorizedClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Lists the tags applied to an Amazon Chime SDK meeting resource.

" + "documentation":"

Retrieves details for the specified phone number order, such as order creation timestamp, phone numbers in E.164 format, product type, and order status.

" }, - "ListMeetings":{ - "name":"ListMeetings", + "GetPhoneNumberSettings":{ + "name":"GetPhoneNumberSettings", "http":{ "method":"GET", - "requestUri":"/meetings", + "requestUri":"/settings/phone-number", "responseCode":200 }, - "input":{"shape":"ListMeetingsRequest"}, - "output":{"shape":"ListMeetingsResponse"}, + "output":{"shape":"GetPhoneNumberSettingsResponse"}, "errors":[ - {"shape":"BadRequestException"}, + {"shape":"UnauthorizedClientException"}, {"shape":"ForbiddenException"}, + {"shape":"BadRequestException"}, {"shape":"ThrottledClientException"}, - {"shape":"UnauthorizedClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Lists up to 100 active Amazon Chime SDK meetings. For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime Developer Guide.

" + "documentation":"

Retrieves the phone number settings for the administrator's AWS account, such as the default outbound calling name.

" }, - "ListPhoneNumberOrders":{ - "name":"ListPhoneNumberOrders", + "GetProxySession":{ + "name":"GetProxySession", "http":{ "method":"GET", - "requestUri":"/phone-number-orders", + "requestUri":"/voice-connectors/{voiceConnectorId}/proxy-sessions/{proxySessionId}", "responseCode":200 }, - "input":{"shape":"ListPhoneNumberOrdersRequest"}, - "output":{"shape":"ListPhoneNumberOrdersResponse"}, + "input":{"shape":"GetProxySessionRequest"}, + "output":{"shape":"GetProxySessionResponse"}, "errors":[ {"shape":"UnauthorizedClientException"}, + {"shape":"NotFoundException"}, {"shape":"ForbiddenException"}, {"shape":"BadRequestException"}, {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Lists the phone number orders for the administrator's Amazon Chime account.

" + "documentation":"

Gets the specified proxy session details for the specified Amazon Chime Voice Connector.

" }, - "ListPhoneNumbers":{ - "name":"ListPhoneNumbers", + "GetRetentionSettings":{ + "name":"GetRetentionSettings", "http":{ "method":"GET", - "requestUri":"/phone-numbers" + "requestUri":"/accounts/{accountId}/retention-settings" }, - "input":{"shape":"ListPhoneNumbersRequest"}, - "output":{"shape":"ListPhoneNumbersResponse"}, + "input":{"shape":"GetRetentionSettingsRequest"}, + "output":{"shape":"GetRetentionSettingsResponse"}, "errors":[ {"shape":"UnauthorizedClientException"}, + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, {"shape":"ForbiddenException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Gets the retention settings for the specified Amazon Chime Enterprise account. For more information about retention settings, see Managing Chat Retention Policies in the Amazon Chime Administration Guide.

" + }, + "GetRoom":{ + "name":"GetRoom", + "http":{ + "method":"GET", + "requestUri":"/accounts/{accountId}/rooms/{roomId}", + "responseCode":200 + }, + "input":{"shape":"GetRoomRequest"}, + "output":{"shape":"GetRoomResponse"}, + "errors":[ {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"NotFoundException"}, + {"shape":"UnauthorizedClientException"}, {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Lists the phone numbers for the specified Amazon Chime account, Amazon Chime user, Amazon Chime Voice Connector, or Amazon Chime Voice Connector group.

" + "documentation":"

Retrieves room details, such as the room name, for a room in an Amazon Chime Enterprise account.

" }, - "ListProxySessions":{ - "name":"ListProxySessions", + "GetSipMediaApplication":{ + "name":"GetSipMediaApplication", "http":{ "method":"GET", - "requestUri":"/voice-connectors/{voiceConnectorId}/proxy-sessions", + "requestUri":"/sip-media-applications/{sipMediaApplicationId}", "responseCode":200 }, - "input":{"shape":"ListProxySessionsRequest"}, - "output":{"shape":"ListProxySessionsResponse"}, + "input":{"shape":"GetSipMediaApplicationRequest"}, + "output":{"shape":"GetSipMediaApplicationResponse"}, "errors":[ {"shape":"UnauthorizedClientException"}, {"shape":"NotFoundException"}, @@ -1535,75 +1855,77 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Lists the proxy sessions for the specified Amazon Chime Voice Connector.

" + "documentation":"

Retrieves the information for a SIP media application, including name, AWS Region, and endpoints.

" }, - "ListRoomMemberships":{ - "name":"ListRoomMemberships", + "GetSipMediaApplicationLoggingConfiguration":{ + "name":"GetSipMediaApplicationLoggingConfiguration", "http":{ "method":"GET", - "requestUri":"/accounts/{accountId}/rooms/{roomId}/memberships", + "requestUri":"/sip-media-applications/{sipMediaApplicationId}/logging-configuration", "responseCode":200 }, - "input":{"shape":"ListRoomMembershipsRequest"}, - "output":{"shape":"ListRoomMembershipsResponse"}, + "input":{"shape":"GetSipMediaApplicationLoggingConfigurationRequest"}, + "output":{"shape":"GetSipMediaApplicationLoggingConfigurationResponse"}, "errors":[ + {"shape":"UnauthorizedClientException"}, {"shape":"NotFoundException"}, - {"shape":"BadRequestException"}, {"shape":"ForbiddenException"}, - {"shape":"UnauthorizedClientException"}, + {"shape":"BadRequestException"}, {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Lists the membership details for the specified room in an Amazon Chime Enterprise account, such as the members' IDs, email addresses, and names.

" + "documentation":"

Returns the logging configuration for the specified SIP media application.

" }, - "ListRooms":{ - "name":"ListRooms", + "GetSipRule":{ + "name":"GetSipRule", "http":{ "method":"GET", - "requestUri":"/accounts/{accountId}/rooms", + "requestUri":"/sip-rules/{sipRuleId}", "responseCode":200 }, - "input":{"shape":"ListRoomsRequest"}, - "output":{"shape":"ListRoomsResponse"}, + "input":{"shape":"GetSipRuleRequest"}, + "output":{"shape":"GetSipRuleResponse"}, "errors":[ + {"shape":"UnauthorizedClientException"}, {"shape":"NotFoundException"}, - {"shape":"BadRequestException"}, {"shape":"ForbiddenException"}, - {"shape":"UnauthorizedClientException"}, + {"shape":"BadRequestException"}, {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Lists the room details for the specified Amazon Chime Enterprise account. Optionally, filter the results by a member ID (user ID or bot ID) to see a list of rooms that the member belongs to.

" + "documentation":"

Retrieves the details of a SIP rule, such as the rule ID, name, triggers, and target endpoints.

" }, - "ListTagsForResource":{ - "name":"ListTagsForResource", + "GetUser":{ + "name":"GetUser", "http":{ "method":"GET", - "requestUri":"/tags" + "requestUri":"/accounts/{accountId}/users/{userId}", + "responseCode":200 }, - "input":{"shape":"ListTagsForResourceRequest"}, - "output":{"shape":"ListTagsForResourceResponse"}, + "input":{"shape":"GetUserRequest"}, + "output":{"shape":"GetUserResponse"}, "errors":[ - {"shape":"BadRequestException"}, - {"shape":"ForbiddenException"}, - {"shape":"NotFoundException"}, {"shape":"UnauthorizedClientException"}, + {"shape":"NotFoundException"}, + {"shape":"ForbiddenException"}, + {"shape":"BadRequestException"}, + {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Lists the tags applied to an Amazon Chime SDK meeting resource.

" + "documentation":"

Retrieves details for the specified user ID, such as primary email address, license type, and personal meeting PIN.

To retrieve user details with an email address instead of a user ID, use the ListUsers action, and then filter by email address.

" }, - "ListUsers":{ - "name":"ListUsers", + "GetUserSettings":{ + "name":"GetUserSettings", "http":{ "method":"GET", - "requestUri":"/accounts/{accountId}/users", + "requestUri":"/accounts/{accountId}/users/{userId}/settings", "responseCode":200 }, - "input":{"shape":"ListUsersRequest"}, - "output":{"shape":"ListUsersResponse"}, + "input":{"shape":"GetUserSettingsRequest"}, + "output":{"shape":"GetUserSettingsResponse"}, "errors":[ {"shape":"UnauthorizedClientException"}, {"shape":"NotFoundException"}, @@ -1613,36 +1935,37 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Lists the users that belong to the specified Amazon Chime account. You can specify an email address to list only the user that the email address belongs to.

" + "documentation":"

Retrieves settings for the specified user ID, such as any associated phone number settings.

" }, - "ListVoiceConnectorGroups":{ - "name":"ListVoiceConnectorGroups", + "GetVoiceConnector":{ + "name":"GetVoiceConnector", "http":{ "method":"GET", - "requestUri":"/voice-connector-groups", + "requestUri":"/voice-connectors/{voiceConnectorId}", "responseCode":200 }, - "input":{"shape":"ListVoiceConnectorGroupsRequest"}, - "output":{"shape":"ListVoiceConnectorGroupsResponse"}, + "input":{"shape":"GetVoiceConnectorRequest"}, + "output":{"shape":"GetVoiceConnectorResponse"}, "errors":[ {"shape":"UnauthorizedClientException"}, + {"shape":"NotFoundException"}, {"shape":"ForbiddenException"}, {"shape":"BadRequestException"}, {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Lists the Amazon Chime Voice Connector groups for the administrator's AWS account.

" + "documentation":"

Retrieves details for the specified Amazon Chime Voice Connector, such as timestamps, name, outbound host, and encryption requirements.

" }, - "ListVoiceConnectorTerminationCredentials":{ - "name":"ListVoiceConnectorTerminationCredentials", + "GetVoiceConnectorEmergencyCallingConfiguration":{ + "name":"GetVoiceConnectorEmergencyCallingConfiguration", "http":{ "method":"GET", - "requestUri":"/voice-connectors/{voiceConnectorId}/termination/credentials", + "requestUri":"/voice-connectors/{voiceConnectorId}/emergency-calling-configuration", "responseCode":200 }, - "input":{"shape":"ListVoiceConnectorTerminationCredentialsRequest"}, - "output":{"shape":"ListVoiceConnectorTerminationCredentialsResponse"}, + "input":{"shape":"GetVoiceConnectorEmergencyCallingConfigurationRequest"}, + "output":{"shape":"GetVoiceConnectorEmergencyCallingConfigurationResponse"}, "errors":[ {"shape":"UnauthorizedClientException"}, {"shape":"NotFoundException"}, @@ -1652,36 +1975,37 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Lists the SIP credentials for the specified Amazon Chime Voice Connector.

" + "documentation":"

Gets the emergency calling configuration details for the specified Amazon Chime Voice Connector.

" }, - "ListVoiceConnectors":{ - "name":"ListVoiceConnectors", + "GetVoiceConnectorGroup":{ + "name":"GetVoiceConnectorGroup", "http":{ "method":"GET", - "requestUri":"/voice-connectors", + "requestUri":"/voice-connector-groups/{voiceConnectorGroupId}", "responseCode":200 }, - "input":{"shape":"ListVoiceConnectorsRequest"}, - "output":{"shape":"ListVoiceConnectorsResponse"}, + "input":{"shape":"GetVoiceConnectorGroupRequest"}, + "output":{"shape":"GetVoiceConnectorGroupResponse"}, "errors":[ {"shape":"UnauthorizedClientException"}, + {"shape":"NotFoundException"}, {"shape":"ForbiddenException"}, {"shape":"BadRequestException"}, {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Lists the Amazon Chime Voice Connectors for the administrator's AWS account.

" + "documentation":"

Retrieves details for the specified Amazon Chime Voice Connector group, such as timestamps, name, and associated VoiceConnectorItems.

" }, - "LogoutUser":{ - "name":"LogoutUser", + "GetVoiceConnectorLoggingConfiguration":{ + "name":"GetVoiceConnectorLoggingConfiguration", "http":{ - "method":"POST", - "requestUri":"/accounts/{accountId}/users/{userId}?operation=logout", - "responseCode":204 + "method":"GET", + "requestUri":"/voice-connectors/{voiceConnectorId}/logging-configuration", + "responseCode":200 }, - "input":{"shape":"LogoutUserRequest"}, - "output":{"shape":"LogoutUserResponse"}, + "input":{"shape":"GetVoiceConnectorLoggingConfigurationRequest"}, + "output":{"shape":"GetVoiceConnectorLoggingConfigurationResponse"}, "errors":[ {"shape":"UnauthorizedClientException"}, {"shape":"NotFoundException"}, @@ -1691,58 +2015,57 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Logs out the specified user from all of the devices they are currently logged into.

" + "documentation":"

Retrieves the logging configuration details for the specified Amazon Chime Voice Connector. Shows whether SIP message logs are enabled for sending to Amazon CloudWatch.

" }, - "PutEventsConfiguration":{ - "name":"PutEventsConfiguration", + "GetVoiceConnectorOrigination":{ + "name":"GetVoiceConnectorOrigination", "http":{ - "method":"PUT", - "requestUri":"/accounts/{accountId}/bots/{botId}/events-configuration", - "responseCode":201 + "method":"GET", + "requestUri":"/voice-connectors/{voiceConnectorId}/origination", + "responseCode":200 }, - "input":{"shape":"PutEventsConfigurationRequest"}, - "output":{"shape":"PutEventsConfigurationResponse"}, + "input":{"shape":"GetVoiceConnectorOriginationRequest"}, + "output":{"shape":"GetVoiceConnectorOriginationResponse"}, "errors":[ - {"shape":"ServiceUnavailableException"}, - {"shape":"ServiceFailureException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"NotFoundException"}, {"shape":"ForbiddenException"}, {"shape":"BadRequestException"}, - {"shape":"UnauthorizedClientException"}, - {"shape":"ResourceLimitExceededException"}, - {"shape":"NotFoundException"} + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} ], - "documentation":"

Creates an events configuration that allows a bot to receive outgoing events sent by Amazon Chime. Choose either an HTTPS endpoint or a Lambda function ARN. For more information, see Bot.

" + "documentation":"

Retrieves origination setting details for the specified Amazon Chime Voice Connector.

" }, - "PutRetentionSettings":{ - "name":"PutRetentionSettings", + "GetVoiceConnectorProxy":{ + "name":"GetVoiceConnectorProxy", "http":{ - "method":"PUT", - "requestUri":"/accounts/{accountId}/retention-settings", - "responseCode":204 + "method":"GET", + "requestUri":"/voice-connectors/{voiceConnectorId}/programmable-numbers/proxy", + "responseCode":200 }, - "input":{"shape":"PutRetentionSettingsRequest"}, - "output":{"shape":"PutRetentionSettingsResponse"}, + "input":{"shape":"GetVoiceConnectorProxyRequest"}, + "output":{"shape":"GetVoiceConnectorProxyResponse"}, "errors":[ {"shape":"UnauthorizedClientException"}, {"shape":"NotFoundException"}, - {"shape":"BadRequestException"}, {"shape":"ForbiddenException"}, - {"shape":"ConflictException"}, + {"shape":"BadRequestException"}, {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Puts retention settings for the specified Amazon Chime Enterprise account. We recommend using AWS CloudTrail to monitor usage of this API for your account. For more information, see Logging Amazon Chime API Calls with AWS CloudTrail in the Amazon Chime Administration Guide.

To turn off existing retention settings, remove the number of days from the corresponding RetentionDays field in the RetentionSettings object. For more information about retention settings, see Managing Chat Retention Policies in the Amazon Chime Administration Guide.

" + "documentation":"

Gets the proxy configuration details for the specified Amazon Chime Voice Connector.

" }, - "PutVoiceConnectorEmergencyCallingConfiguration":{ - "name":"PutVoiceConnectorEmergencyCallingConfiguration", + "GetVoiceConnectorStreamingConfiguration":{ + "name":"GetVoiceConnectorStreamingConfiguration", "http":{ - "method":"PUT", - "requestUri":"/voice-connectors/{voiceConnectorId}/emergency-calling-configuration", + "method":"GET", + "requestUri":"/voice-connectors/{voiceConnectorId}/streaming-configuration", "responseCode":200 }, - "input":{"shape":"PutVoiceConnectorEmergencyCallingConfigurationRequest"}, - "output":{"shape":"PutVoiceConnectorEmergencyCallingConfigurationResponse"}, + "input":{"shape":"GetVoiceConnectorStreamingConfigurationRequest"}, + "output":{"shape":"GetVoiceConnectorStreamingConfigurationResponse"}, "errors":[ {"shape":"UnauthorizedClientException"}, {"shape":"NotFoundException"}, @@ -1752,17 +2075,17 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Puts emergency calling configuration details to the specified Amazon Chime Voice Connector, such as emergency phone numbers and calling countries. Origination and termination settings must be enabled for the Amazon Chime Voice Connector before emergency calling can be configured.

" + "documentation":"

Retrieves the streaming configuration details for the specified Amazon Chime Voice Connector. Shows whether media streaming is enabled for sending to Amazon Kinesis. It also shows the retention period, in hours, for the Amazon Kinesis data.

" }, - "PutVoiceConnectorLoggingConfiguration":{ - "name":"PutVoiceConnectorLoggingConfiguration", + "GetVoiceConnectorTermination":{ + "name":"GetVoiceConnectorTermination", "http":{ - "method":"PUT", - "requestUri":"/voice-connectors/{voiceConnectorId}/logging-configuration", + "method":"GET", + "requestUri":"/voice-connectors/{voiceConnectorId}/termination", "responseCode":200 }, - "input":{"shape":"PutVoiceConnectorLoggingConfigurationRequest"}, - "output":{"shape":"PutVoiceConnectorLoggingConfigurationResponse"}, + "input":{"shape":"GetVoiceConnectorTerminationRequest"}, + "output":{"shape":"GetVoiceConnectorTerminationResponse"}, "errors":[ {"shape":"UnauthorizedClientException"}, {"shape":"NotFoundException"}, @@ -1772,17 +2095,17 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Adds a logging configuration for the specified Amazon Chime Voice Connector. The logging configuration specifies whether SIP message logs are enabled for sending to Amazon CloudWatch Logs.

" + "documentation":"

Retrieves termination setting details for the specified Amazon Chime Voice Connector.

" }, - "PutVoiceConnectorOrigination":{ - "name":"PutVoiceConnectorOrigination", + "GetVoiceConnectorTerminationHealth":{ + "name":"GetVoiceConnectorTerminationHealth", "http":{ - "method":"PUT", - "requestUri":"/voice-connectors/{voiceConnectorId}/origination", + "method":"GET", + "requestUri":"/voice-connectors/{voiceConnectorId}/termination/health", "responseCode":200 }, - "input":{"shape":"PutVoiceConnectorOriginationRequest"}, - "output":{"shape":"PutVoiceConnectorOriginationResponse"}, + "input":{"shape":"GetVoiceConnectorTerminationHealthRequest"}, + "output":{"shape":"GetVoiceConnectorTerminationHealthResponse"}, "errors":[ {"shape":"UnauthorizedClientException"}, {"shape":"NotFoundException"}, @@ -1792,19 +2115,19 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Adds origination settings for the specified Amazon Chime Voice Connector.

If emergency calling is configured for the Amazon Chime Voice Connector, it must be deleted prior to turning off origination settings.

" + "documentation":"

Retrieves information about the last time a SIP OPTIONS ping was received from your SIP infrastructure for the specified Amazon Chime Voice Connector.

" }, - "PutVoiceConnectorProxy":{ - "name":"PutVoiceConnectorProxy", + "InviteUsers":{ + "name":"InviteUsers", "http":{ - "method":"PUT", - "requestUri":"/voice-connectors/{voiceConnectorId}/programmable-numbers/proxy" + "method":"POST", + "requestUri":"/accounts/{accountId}/users?operation=add", + "responseCode":201 }, - "input":{"shape":"PutVoiceConnectorProxyRequest"}, - "output":{"shape":"PutVoiceConnectorProxyResponse"}, + "input":{"shape":"InviteUsersRequest"}, + "output":{"shape":"InviteUsersResponse"}, "errors":[ {"shape":"UnauthorizedClientException"}, - {"shape":"AccessDeniedException"}, {"shape":"NotFoundException"}, {"shape":"ForbiddenException"}, {"shape":"BadRequestException"}, @@ -1812,17 +2135,16 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Puts the specified proxy configuration to the specified Amazon Chime Voice Connector.

" + "documentation":"

Sends email to a maximum of 50 users, inviting them to the specified Amazon Chime Team account. Only Team account types are currently supported for this action.

" }, - "PutVoiceConnectorStreamingConfiguration":{ - "name":"PutVoiceConnectorStreamingConfiguration", + "ListAccounts":{ + "name":"ListAccounts", "http":{ - "method":"PUT", - "requestUri":"/voice-connectors/{voiceConnectorId}/streaming-configuration", - "responseCode":200 + "method":"GET", + "requestUri":"/accounts" }, - "input":{"shape":"PutVoiceConnectorStreamingConfigurationRequest"}, - "output":{"shape":"PutVoiceConnectorStreamingConfigurationResponse"}, + "input":{"shape":"ListAccountsRequest"}, + "output":{"shape":"ListAccountsResponse"}, "errors":[ {"shape":"UnauthorizedClientException"}, {"shape":"NotFoundException"}, @@ -1832,389 +2154,414 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Adds a streaming configuration for the specified Amazon Chime Voice Connector. The streaming configuration specifies whether media streaming is enabled for sending to Amazon Kinesis. It also sets the retention period, in hours, for the Amazon Kinesis data.

" + "documentation":"

Lists the Amazon Chime accounts under the administrator's AWS account. You can filter accounts by account name prefix. To find out which Amazon Chime account a user belongs to, you can filter by the user's email address, which returns one account result.

" }, - "PutVoiceConnectorTermination":{ - "name":"PutVoiceConnectorTermination", + "ListAppInstanceAdmins":{ + "name":"ListAppInstanceAdmins", "http":{ - "method":"PUT", - "requestUri":"/voice-connectors/{voiceConnectorId}/termination", + "method":"GET", + "requestUri":"/app-instances/{appInstanceArn}/admins", "responseCode":200 }, - "input":{"shape":"PutVoiceConnectorTerminationRequest"}, - "output":{"shape":"PutVoiceConnectorTerminationResponse"}, + "input":{"shape":"ListAppInstanceAdminsRequest"}, + "output":{"shape":"ListAppInstanceAdminsResponse"}, "errors":[ - {"shape":"UnauthorizedClientException"}, - {"shape":"NotFoundException"}, - {"shape":"ForbiddenException"}, {"shape":"BadRequestException"}, - {"shape":"AccessDeniedException"}, + {"shape":"ForbiddenException"}, + {"shape":"ResourceLimitExceededException"}, {"shape":"ThrottledClientException"}, + {"shape":"UnauthorizedClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Adds termination settings for the specified Amazon Chime Voice Connector.

If emergency calling is configured for the Amazon Chime Voice Connector, it must be deleted prior to turning off termination settings.

" + "documentation":"

Returns a list of the administrators in the app instance.

", + "endpoint":{"hostPrefix":"identity-"} }, - "PutVoiceConnectorTerminationCredentials":{ - "name":"PutVoiceConnectorTerminationCredentials", + "ListAppInstanceUsers":{ + "name":"ListAppInstanceUsers", "http":{ - "method":"POST", - "requestUri":"/voice-connectors/{voiceConnectorId}/termination/credentials?operation=put", - "responseCode":204 + "method":"GET", + "requestUri":"/app-instance-users", + "responseCode":200 }, - "input":{"shape":"PutVoiceConnectorTerminationCredentialsRequest"}, + "input":{"shape":"ListAppInstanceUsersRequest"}, + "output":{"shape":"ListAppInstanceUsersResponse"}, "errors":[ - {"shape":"UnauthorizedClientException"}, - {"shape":"NotFoundException"}, - {"shape":"ForbiddenException"}, {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, {"shape":"ThrottledClientException"}, + {"shape":"UnauthorizedClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Adds termination SIP credentials for the specified Amazon Chime Voice Connector.

" + "documentation":"

List all AppInstanceUsers created under a single app instance.

", + "endpoint":{"hostPrefix":"identity-"} }, - "RedactConversationMessage":{ - "name":"RedactConversationMessage", + "ListAppInstances":{ + "name":"ListAppInstances", "http":{ - "method":"POST", - "requestUri":"/accounts/{accountId}/conversations/{conversationId}/messages/{messageId}?operation=redact", + "method":"GET", + "requestUri":"/app-instances", "responseCode":200 }, - "input":{"shape":"RedactConversationMessageRequest"}, - "output":{"shape":"RedactConversationMessageResponse"}, + "input":{"shape":"ListAppInstancesRequest"}, + "output":{"shape":"ListAppInstancesResponse"}, "errors":[ - {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, {"shape":"ForbiddenException"}, - {"shape":"UnauthorizedClientException"}, {"shape":"ThrottledClientException"}, - {"shape":"BadRequestException"}, + {"shape":"UnauthorizedClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Redacts the specified message from the specified Amazon Chime conversation.

" + "documentation":"

Lists all Amazon Chime app instances created under a single AWS account.

", + "endpoint":{"hostPrefix":"identity-"} }, - "RedactRoomMessage":{ - "name":"RedactRoomMessage", + "ListAttendeeTags":{ + "name":"ListAttendeeTags", "http":{ - "method":"POST", - "requestUri":"/accounts/{accountId}/rooms/{roomId}/messages/{messageId}?operation=redact", + "method":"GET", + "requestUri":"/meetings/{meetingId}/attendees/{attendeeId}/tags", "responseCode":200 }, - "input":{"shape":"RedactRoomMessageRequest"}, - "output":{"shape":"RedactRoomMessageResponse"}, + "input":{"shape":"ListAttendeeTagsRequest"}, + "output":{"shape":"ListAttendeeTagsResponse"}, "errors":[ - {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, {"shape":"ForbiddenException"}, - {"shape":"UnauthorizedClientException"}, + {"shape":"NotFoundException"}, {"shape":"ThrottledClientException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Lists the tags applied to an Amazon Chime SDK attendee resource.

" + }, + "ListAttendees":{ + "name":"ListAttendees", + "http":{ + "method":"GET", + "requestUri":"/meetings/{meetingId}/attendees", + "responseCode":200 + }, + "input":{"shape":"ListAttendeesRequest"}, + "output":{"shape":"ListAttendeesResponse"}, + "errors":[ {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"NotFoundException"}, + {"shape":"ThrottledClientException"}, + {"shape":"UnauthorizedClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Redacts the specified message from the specified Amazon Chime chat room.

" + "documentation":"

Lists the attendees for the specified Amazon Chime SDK meeting. For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime Developer Guide.

" }, - "RegenerateSecurityToken":{ - "name":"RegenerateSecurityToken", + "ListBots":{ + "name":"ListBots", "http":{ - "method":"POST", - "requestUri":"/accounts/{accountId}/bots/{botId}?operation=regenerate-security-token", + "method":"GET", + "requestUri":"/accounts/{accountId}/bots", "responseCode":200 }, - "input":{"shape":"RegenerateSecurityTokenRequest"}, - "output":{"shape":"RegenerateSecurityTokenResponse"}, + "input":{"shape":"ListBotsRequest"}, + "output":{"shape":"ListBotsResponse"}, "errors":[ {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"}, {"shape":"ForbiddenException"}, - {"shape":"BadRequestException"}, {"shape":"UnauthorizedClientException"}, + {"shape":"BadRequestException"}, {"shape":"NotFoundException"}, {"shape":"ThrottledClientException"} ], - "documentation":"

Regenerates the security token for a bot.

" + "documentation":"

Lists the bots associated with the administrator's Amazon Chime Enterprise account ID.

" }, - "ResetPersonalPIN":{ - "name":"ResetPersonalPIN", + "ListChannelBans":{ + "name":"ListChannelBans", "http":{ - "method":"POST", - "requestUri":"/accounts/{accountId}/users/{userId}?operation=reset-personal-pin", + "method":"GET", + "requestUri":"/channels/{channelArn}/bans", "responseCode":200 }, - "input":{"shape":"ResetPersonalPINRequest"}, - "output":{"shape":"ResetPersonalPINResponse"}, + "input":{"shape":"ListChannelBansRequest"}, + "output":{"shape":"ListChannelBansResponse"}, "errors":[ - {"shape":"UnauthorizedClientException"}, - {"shape":"NotFoundException"}, - {"shape":"ForbiddenException"}, {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedClientException"}, {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Resets the personal meeting PIN for the specified user on an Amazon Chime account. Returns the User object with the updated personal meeting PIN.

" + "documentation":"

Lists all the users banned from a particular channel.

", + "endpoint":{"hostPrefix":"messaging-"} }, - "RestorePhoneNumber":{ - "name":"RestorePhoneNumber", + "ListChannelMemberships":{ + "name":"ListChannelMemberships", "http":{ - "method":"POST", - "requestUri":"/phone-numbers/{phoneNumberId}?operation=restore", + "method":"GET", + "requestUri":"/channels/{channelArn}/memberships", "responseCode":200 }, - "input":{"shape":"RestorePhoneNumberRequest"}, - "output":{"shape":"RestorePhoneNumberResponse"}, + "input":{"shape":"ListChannelMembershipsRequest"}, + "output":{"shape":"ListChannelMembershipsResponse"}, "errors":[ - {"shape":"UnauthorizedClientException"}, - {"shape":"NotFoundException"}, - {"shape":"ForbiddenException"}, {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedClientException"}, {"shape":"ThrottledClientException"}, - {"shape":"ResourceLimitExceededException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Moves a phone number from the Deletion queue back into the phone number Inventory.

" + "documentation":"

Lists all channel memberships in a channel.

", + "endpoint":{"hostPrefix":"messaging-"} }, - "SearchAvailablePhoneNumbers":{ - "name":"SearchAvailablePhoneNumbers", + "ListChannelMembershipsForAppInstanceUser":{ + "name":"ListChannelMembershipsForAppInstanceUser", "http":{ "method":"GET", - "requestUri":"/search?type=phone-numbers" + "requestUri":"/channels?scope=app-instance-user-memberships", + "responseCode":200 }, - "input":{"shape":"SearchAvailablePhoneNumbersRequest"}, - "output":{"shape":"SearchAvailablePhoneNumbersResponse"}, + "input":{"shape":"ListChannelMembershipsForAppInstanceUserRequest"}, + "output":{"shape":"ListChannelMembershipsForAppInstanceUserResponse"}, "errors":[ {"shape":"BadRequestException"}, {"shape":"ForbiddenException"}, - {"shape":"AccessDeniedException"}, {"shape":"UnauthorizedClientException"}, {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Searches phone numbers that can be ordered.

" + "documentation":"

Lists all channels that a particular AppInstanceUser is a part of. Only an AppInstanceAdmin can call the API with a user ARN that is not their own.

", + "endpoint":{"hostPrefix":"messaging-"} }, - "TagAttendee":{ - "name":"TagAttendee", + "ListChannelMessages":{ + "name":"ListChannelMessages", "http":{ - "method":"POST", - "requestUri":"/meetings/{meetingId}/attendees/{attendeeId}/tags?operation=add", - "responseCode":204 + "method":"GET", + "requestUri":"/channels/{channelArn}/messages", + "responseCode":200 }, - "input":{"shape":"TagAttendeeRequest"}, + "input":{"shape":"ListChannelMessagesRequest"}, + "output":{"shape":"ListChannelMessagesResponse"}, "errors":[ {"shape":"BadRequestException"}, {"shape":"ForbiddenException"}, - {"shape":"NotFoundException"}, - {"shape":"ResourceLimitExceededException"}, - {"shape":"ThrottledClientException"}, {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Applies the specified tags to the specified Amazon Chime SDK attendee.

" + "documentation":"

List all the messages in a channel. Returns a paginated list of ChannelMessages. Sorted in descending order by default, based on the creation timestamp.

Redacted messages appear in the results as empty, since they are only redacted, not deleted. Deleted messages do not appear in the results. This action always returns the latest version of an edited message.

", + "endpoint":{"hostPrefix":"messaging-"} }, - "TagMeeting":{ - "name":"TagMeeting", + "ListChannelModerators":{ + "name":"ListChannelModerators", "http":{ - "method":"POST", - "requestUri":"/meetings/{meetingId}/tags?operation=add", - "responseCode":204 + "method":"GET", + "requestUri":"/channels/{channelArn}/moderators", + "responseCode":200 }, - "input":{"shape":"TagMeetingRequest"}, + "input":{"shape":"ListChannelModeratorsRequest"}, + "output":{"shape":"ListChannelModeratorsResponse"}, "errors":[ {"shape":"BadRequestException"}, {"shape":"ForbiddenException"}, - {"shape":"NotFoundException"}, - {"shape":"ResourceLimitExceededException"}, - {"shape":"ThrottledClientException"}, {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Applies the specified tags to the specified Amazon Chime SDK meeting.

" + "documentation":"

Lists all the moderators for a channel.

", + "endpoint":{"hostPrefix":"messaging-"} }, - "TagResource":{ - "name":"TagResource", + "ListChannels":{ + "name":"ListChannels", "http":{ - "method":"POST", - "requestUri":"/tags?operation=tag-resource", - "responseCode":204 + "method":"GET", + "requestUri":"/channels", + "responseCode":200 }, - "input":{"shape":"TagResourceRequest"}, + "input":{"shape":"ListChannelsRequest"}, + "output":{"shape":"ListChannelsResponse"}, "errors":[ {"shape":"BadRequestException"}, {"shape":"ForbiddenException"}, - {"shape":"NotFoundException"}, {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Applies the specified tags to the specified Amazon Chime SDK meeting resource.

" + "documentation":"

Lists all Channels created under a single Chime App as a paginated list. You can specify filters to narrow results.

Functionality & restrictions

  • Use privacy = PUBLIC to retrieve all public channels in the account

  • Only an AppInstanceAdmin can set privacy = PRIVATE to list the private channels in an account.

", + "endpoint":{"hostPrefix":"messaging-"} }, - "UntagAttendee":{ - "name":"UntagAttendee", + "ListChannelsModeratedByAppInstanceUser":{ + "name":"ListChannelsModeratedByAppInstanceUser", "http":{ - "method":"POST", - "requestUri":"/meetings/{meetingId}/attendees/{attendeeId}/tags?operation=delete", - "responseCode":204 + "method":"GET", + "requestUri":"/channels?scope=app-instance-user-moderated-channels", + "responseCode":200 }, - "input":{"shape":"UntagAttendeeRequest"}, + "input":{"shape":"ListChannelsModeratedByAppInstanceUserRequest"}, + "output":{"shape":"ListChannelsModeratedByAppInstanceUserResponse"}, "errors":[ {"shape":"BadRequestException"}, {"shape":"ForbiddenException"}, - {"shape":"ThrottledClientException"}, - {"shape":"NotFoundException"}, {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Untags the specified tags from the specified Amazon Chime SDK attendee.

" + "documentation":"

A list of the channels moderated by an app instance user.

", + "endpoint":{"hostPrefix":"messaging-"} }, - "UntagMeeting":{ - "name":"UntagMeeting", + "ListMeetingTags":{ + "name":"ListMeetingTags", "http":{ - "method":"POST", - "requestUri":"/meetings/{meetingId}/tags?operation=delete", - "responseCode":204 + "method":"GET", + "requestUri":"/meetings/{meetingId}/tags", + "responseCode":200 }, - "input":{"shape":"UntagMeetingRequest"}, + "input":{"shape":"ListMeetingTagsRequest"}, + "output":{"shape":"ListMeetingTagsResponse"}, "errors":[ {"shape":"BadRequestException"}, {"shape":"ForbiddenException"}, - {"shape":"ThrottledClientException"}, {"shape":"NotFoundException"}, + {"shape":"ThrottledClientException"}, {"shape":"UnauthorizedClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Untags the specified tags from the specified Amazon Chime SDK meeting.

" + "documentation":"

Lists the tags applied to an Amazon Chime SDK meeting resource.

" }, - "UntagResource":{ - "name":"UntagResource", + "ListMeetings":{ + "name":"ListMeetings", "http":{ - "method":"POST", - "requestUri":"/tags?operation=untag-resource", - "responseCode":204 + "method":"GET", + "requestUri":"/meetings", + "responseCode":200 }, - "input":{"shape":"UntagResourceRequest"}, + "input":{"shape":"ListMeetingsRequest"}, + "output":{"shape":"ListMeetingsResponse"}, "errors":[ {"shape":"BadRequestException"}, {"shape":"ForbiddenException"}, - {"shape":"NotFoundException"}, + {"shape":"ThrottledClientException"}, {"shape":"UnauthorizedClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Untags the specified tags from the specified Amazon Chime SDK meeting resource.

" + "documentation":"

Lists up to 100 active Amazon Chime SDK meetings. For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime Developer Guide.

" }, - "UpdateAccount":{ - "name":"UpdateAccount", + "ListPhoneNumberOrders":{ + "name":"ListPhoneNumberOrders", "http":{ - "method":"POST", - "requestUri":"/accounts/{accountId}", + "method":"GET", + "requestUri":"/phone-number-orders", "responseCode":200 }, - "input":{"shape":"UpdateAccountRequest"}, - "output":{"shape":"UpdateAccountResponse"}, + "input":{"shape":"ListPhoneNumberOrdersRequest"}, + "output":{"shape":"ListPhoneNumberOrdersResponse"}, "errors":[ {"shape":"UnauthorizedClientException"}, - {"shape":"NotFoundException"}, {"shape":"ForbiddenException"}, {"shape":"BadRequestException"}, {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Updates account details for the specified Amazon Chime account. Currently, only account name updates are supported for this action.

" + "documentation":"

Lists the phone number orders for the administrator's Amazon Chime account.

" }, - "UpdateAccountSettings":{ - "name":"UpdateAccountSettings", + "ListPhoneNumbers":{ + "name":"ListPhoneNumbers", "http":{ - "method":"PUT", - "requestUri":"/accounts/{accountId}/settings", - "responseCode":204 + "method":"GET", + "requestUri":"/phone-numbers" }, - "input":{"shape":"UpdateAccountSettingsRequest"}, - "output":{"shape":"UpdateAccountSettingsResponse"}, + "input":{"shape":"ListPhoneNumbersRequest"}, + "output":{"shape":"ListPhoneNumbersResponse"}, "errors":[ {"shape":"UnauthorizedClientException"}, - {"shape":"NotFoundException"}, - {"shape":"BadRequestException"}, {"shape":"ForbiddenException"}, - {"shape":"ConflictException"}, + {"shape":"BadRequestException"}, {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Updates the settings for the specified Amazon Chime account. You can update settings for remote control of shared screens, or for the dial-out option. For more information about these settings, see Use the Policies Page in the Amazon Chime Administration Guide.

" + "documentation":"

Lists the phone numbers for the specified Amazon Chime account, Amazon Chime user, Amazon Chime Voice Connector, or Amazon Chime Voice Connector group.

" }, - "UpdateBot":{ - "name":"UpdateBot", + "ListProxySessions":{ + "name":"ListProxySessions", "http":{ - "method":"POST", - "requestUri":"/accounts/{accountId}/bots/{botId}", + "method":"GET", + "requestUri":"/voice-connectors/{voiceConnectorId}/proxy-sessions", "responseCode":200 }, - "input":{"shape":"UpdateBotRequest"}, - "output":{"shape":"UpdateBotResponse"}, + "input":{"shape":"ListProxySessionsRequest"}, + "output":{"shape":"ListProxySessionsResponse"}, "errors":[ - {"shape":"ServiceUnavailableException"}, - {"shape":"ServiceFailureException"}, - {"shape":"ForbiddenException"}, - {"shape":"BadRequestException"}, {"shape":"UnauthorizedClientException"}, {"shape":"NotFoundException"}, - {"shape":"ThrottledClientException"} + {"shape":"ForbiddenException"}, + {"shape":"BadRequestException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} ], - "documentation":"

Updates the status of the specified bot, such as starting or stopping the bot from running in your Amazon Chime Enterprise account.

" + "documentation":"

Lists the proxy sessions for the specified Amazon Chime Voice Connector.

" }, - "UpdateGlobalSettings":{ - "name":"UpdateGlobalSettings", + "ListRoomMemberships":{ + "name":"ListRoomMemberships", "http":{ - "method":"PUT", - "requestUri":"/settings", - "responseCode":204 + "method":"GET", + "requestUri":"/accounts/{accountId}/rooms/{roomId}/memberships", + "responseCode":200 }, - "input":{"shape":"UpdateGlobalSettingsRequest"}, + "input":{"shape":"ListRoomMembershipsRequest"}, + "output":{"shape":"ListRoomMembershipsResponse"}, "errors":[ - {"shape":"UnauthorizedClientException"}, - {"shape":"ForbiddenException"}, + {"shape":"NotFoundException"}, {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedClientException"}, {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Updates global settings for the administrator's AWS account, such as Amazon Chime Business Calling and Amazon Chime Voice Connector settings.

" + "documentation":"

Lists the membership details for the specified room in an Amazon Chime Enterprise account, such as the members' IDs, email addresses, and names.

" }, - "UpdatePhoneNumber":{ - "name":"UpdatePhoneNumber", + "ListRooms":{ + "name":"ListRooms", "http":{ - "method":"POST", - "requestUri":"/phone-numbers/{phoneNumberId}", + "method":"GET", + "requestUri":"/accounts/{accountId}/rooms", "responseCode":200 }, - "input":{"shape":"UpdatePhoneNumberRequest"}, - "output":{"shape":"UpdatePhoneNumberResponse"}, + "input":{"shape":"ListRoomsRequest"}, + "output":{"shape":"ListRoomsResponse"}, "errors":[ - {"shape":"UnauthorizedClientException"}, {"shape":"NotFoundException"}, - {"shape":"ForbiddenException"}, {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedClientException"}, {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Updates phone number details, such as product type or calling name, for the specified phone number ID. You can update one phone number detail at a time. For example, you can update either the product type or the calling name in one action.

For toll-free numbers, you must use the Amazon Chime Voice Connector product type.

Updates to outbound calling names can take up to 72 hours to complete. Pending updates to outbound calling names must be complete before you can request another update.

" + "documentation":"

Lists the room details for the specified Amazon Chime Enterprise account. Optionally, filter the results by a member ID (user ID or bot ID) to see a list of rooms that the member belongs to.

" }, - "UpdatePhoneNumberSettings":{ - "name":"UpdatePhoneNumberSettings", + "ListSipMediaApplications":{ + "name":"ListSipMediaApplications", "http":{ - "method":"PUT", - "requestUri":"/settings/phone-number", - "responseCode":204 + "method":"GET", + "requestUri":"/sip-media-applications", + "responseCode":200 }, - "input":{"shape":"UpdatePhoneNumberSettingsRequest"}, + "input":{"shape":"ListSipMediaApplicationsRequest"}, + "output":{"shape":"ListSipMediaApplicationsResponse"}, "errors":[ {"shape":"UnauthorizedClientException"}, {"shape":"ForbiddenException"}, @@ -2223,96 +2570,93 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Updates the phone number settings for the administrator's AWS account, such as the default outbound calling name. You can update the default outbound calling name once every seven days. Outbound calling names can take up to 72 hours to update.

" + "documentation":"

Lists the SIP media applications under the administrator's AWS account.

" }, - "UpdateProxySession":{ - "name":"UpdateProxySession", + "ListSipRules":{ + "name":"ListSipRules", "http":{ - "method":"POST", - "requestUri":"/voice-connectors/{voiceConnectorId}/proxy-sessions/{proxySessionId}", - "responseCode":201 + "method":"GET", + "requestUri":"/sip-rules", + "responseCode":200 }, - "input":{"shape":"UpdateProxySessionRequest"}, - "output":{"shape":"UpdateProxySessionResponse"}, + "input":{"shape":"ListSipRulesRequest"}, + "output":{"shape":"ListSipRulesResponse"}, "errors":[ {"shape":"UnauthorizedClientException"}, - {"shape":"NotFoundException"}, {"shape":"ForbiddenException"}, {"shape":"BadRequestException"}, {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Updates the specified proxy session details, such as voice or SMS capabilities.

" + "documentation":"

Lists the SIP rules under the administrator's AWS account.

" }, - "UpdateRoom":{ - "name":"UpdateRoom", + "ListTagsForResource":{ + "name":"ListTagsForResource", "http":{ - "method":"POST", - "requestUri":"/accounts/{accountId}/rooms/{roomId}", - "responseCode":200 + "method":"GET", + "requestUri":"/tags" }, - "input":{"shape":"UpdateRoomRequest"}, - "output":{"shape":"UpdateRoomResponse"}, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, "errors":[ {"shape":"BadRequestException"}, {"shape":"ForbiddenException"}, {"shape":"NotFoundException"}, {"shape":"UnauthorizedClientException"}, - {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Updates room details, such as the room name, for a room in an Amazon Chime Enterprise account.

" + "documentation":"

Lists the tags applied to an Amazon Chime SDK meeting resource.

" }, - "UpdateRoomMembership":{ - "name":"UpdateRoomMembership", + "ListUsers":{ + "name":"ListUsers", "http":{ - "method":"POST", - "requestUri":"/accounts/{accountId}/rooms/{roomId}/memberships/{memberId}", + "method":"GET", + "requestUri":"/accounts/{accountId}/users", "responseCode":200 }, - "input":{"shape":"UpdateRoomMembershipRequest"}, - "output":{"shape":"UpdateRoomMembershipResponse"}, + "input":{"shape":"ListUsersRequest"}, + "output":{"shape":"ListUsersResponse"}, "errors":[ {"shape":"UnauthorizedClientException"}, {"shape":"NotFoundException"}, - {"shape":"BadRequestException"}, {"shape":"ForbiddenException"}, + {"shape":"BadRequestException"}, {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Updates room membership details, such as the member role, for a room in an Amazon Chime Enterprise account. The member role designates whether the member is a chat room administrator or a general chat room member. The member role can be updated only for user IDs.

" + "documentation":"

Lists the users that belong to the specified Amazon Chime account. You can specify an email address to list only the user that the email address belongs to.

" }, - "UpdateUser":{ - "name":"UpdateUser", + "ListVoiceConnectorGroups":{ + "name":"ListVoiceConnectorGroups", "http":{ - "method":"POST", - "requestUri":"/accounts/{accountId}/users/{userId}", + "method":"GET", + "requestUri":"/voice-connector-groups", "responseCode":200 }, - "input":{"shape":"UpdateUserRequest"}, - "output":{"shape":"UpdateUserResponse"}, + "input":{"shape":"ListVoiceConnectorGroupsRequest"}, + "output":{"shape":"ListVoiceConnectorGroupsResponse"}, "errors":[ {"shape":"UnauthorizedClientException"}, - {"shape":"NotFoundException"}, {"shape":"ForbiddenException"}, {"shape":"BadRequestException"}, {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Updates user details for a specified user ID. Currently, only LicenseType updates are supported for this action.

" + "documentation":"

Lists the Amazon Chime Voice Connector groups for the administrator's AWS account.

" }, - "UpdateUserSettings":{ - "name":"UpdateUserSettings", + "ListVoiceConnectorTerminationCredentials":{ + "name":"ListVoiceConnectorTerminationCredentials", "http":{ - "method":"PUT", - "requestUri":"/accounts/{accountId}/users/{userId}/settings", - "responseCode":204 + "method":"GET", + "requestUri":"/voice-connectors/{voiceConnectorId}/termination/credentials", + "responseCode":200 }, - "input":{"shape":"UpdateUserSettingsRequest"}, + "input":{"shape":"ListVoiceConnectorTerminationCredentialsRequest"}, + "output":{"shape":"ListVoiceConnectorTerminationCredentialsResponse"}, "errors":[ {"shape":"UnauthorizedClientException"}, {"shape":"NotFoundException"}, @@ -2322,17 +2666,36 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Updates the settings for the specified user, such as phone number settings.

" + "documentation":"

Lists the SIP credentials for the specified Amazon Chime Voice Connector.

" }, - "UpdateVoiceConnector":{ - "name":"UpdateVoiceConnector", + "ListVoiceConnectors":{ + "name":"ListVoiceConnectors", "http":{ - "method":"PUT", - "requestUri":"/voice-connectors/{voiceConnectorId}", + "method":"GET", + "requestUri":"/voice-connectors", "responseCode":200 }, - "input":{"shape":"UpdateVoiceConnectorRequest"}, - "output":{"shape":"UpdateVoiceConnectorResponse"}, + "input":{"shape":"ListVoiceConnectorsRequest"}, + "output":{"shape":"ListVoiceConnectorsResponse"}, + "errors":[ + {"shape":"UnauthorizedClientException"}, + {"shape":"ForbiddenException"}, + {"shape":"BadRequestException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Lists the Amazon Chime Voice Connectors for the administrator's AWS account.

" + }, + "LogoutUser":{ + "name":"LogoutUser", + "http":{ + "method":"POST", + "requestUri":"/accounts/{accountId}/users/{userId}?operation=logout", + "responseCode":204 + }, + "input":{"shape":"LogoutUserRequest"}, + "output":{"shape":"LogoutUserResponse"}, "errors":[ {"shape":"UnauthorizedClientException"}, {"shape":"NotFoundException"}, @@ -2342,318 +2705,3040 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Updates details for the specified Amazon Chime Voice Connector.

" + "documentation":"

Logs out the specified user from all of the devices they are currently logged into.

" }, - "UpdateVoiceConnectorGroup":{ - "name":"UpdateVoiceConnectorGroup", + "PutAppInstanceRetentionSettings":{ + "name":"PutAppInstanceRetentionSettings", "http":{ "method":"PUT", - "requestUri":"/voice-connector-groups/{voiceConnectorGroupId}", - "responseCode":202 + "requestUri":"/app-instances/{appInstanceArn}/retention-settings", + "responseCode":200 }, - "input":{"shape":"UpdateVoiceConnectorGroupRequest"}, - "output":{"shape":"UpdateVoiceConnectorGroupResponse"}, + "input":{"shape":"PutAppInstanceRetentionSettingsRequest"}, + "output":{"shape":"PutAppInstanceRetentionSettingsResponse"}, "errors":[ {"shape":"UnauthorizedClientException"}, {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Sets the amount of time in days that a given app instance retains data.

", + "endpoint":{"hostPrefix":"identity-"} + }, + "PutAppInstanceStreamingConfigurations":{ + "name":"PutAppInstanceStreamingConfigurations", + "http":{ + "method":"PUT", + "requestUri":"/app-instances/{appInstanceArn}/streaming-configurations", + "responseCode":200 + }, + "input":{"shape":"PutAppInstanceStreamingConfigurationsRequest"}, + "output":{"shape":"PutAppInstanceStreamingConfigurationsResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

The data streaming configurations of an app instance.

" + }, + "PutEventsConfiguration":{ + "name":"PutEventsConfiguration", + "http":{ + "method":"PUT", + "requestUri":"/accounts/{accountId}/bots/{botId}/events-configuration", + "responseCode":201 + }, + "input":{"shape":"PutEventsConfigurationRequest"}, + "output":{"shape":"PutEventsConfigurationResponse"}, + "errors":[ + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"}, {"shape":"ForbiddenException"}, {"shape":"BadRequestException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"NotFoundException"} + ], + "documentation":"

Creates an events configuration that allows a bot to receive outgoing events sent by Amazon Chime. Choose either an HTTPS endpoint or a Lambda function ARN. For more information, see Bot.

" + }, + "PutRetentionSettings":{ + "name":"PutRetentionSettings", + "http":{ + "method":"PUT", + "requestUri":"/accounts/{accountId}/retention-settings", + "responseCode":204 + }, + "input":{"shape":"PutRetentionSettingsRequest"}, + "output":{"shape":"PutRetentionSettingsResponse"}, + "errors":[ + {"shape":"UnauthorizedClientException"}, + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, {"shape":"ConflictException"}, {"shape":"ThrottledClientException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Updates details for the specified Amazon Chime Voice Connector group, such as the name and Amazon Chime Voice Connector priority ranking.

" - } - }, - "shapes":{ - "AccessDeniedException":{ + "documentation":"

Puts retention settings for the specified Amazon Chime Enterprise account. We recommend using AWS CloudTrail to monitor usage of this API for your account. For more information, see Logging Amazon Chime API Calls with AWS CloudTrail in the Amazon Chime Administration Guide.

To turn off existing retention settings, remove the number of days from the corresponding RetentionDays field in the RetentionSettings object. For more information about retention settings, see Managing Chat Retention Policies in the Amazon Chime Administration Guide.

" + }, + "PutSipMediaApplicationLoggingConfiguration":{ + "name":"PutSipMediaApplicationLoggingConfiguration", + "http":{ + "method":"PUT", + "requestUri":"/sip-media-applications/{sipMediaApplicationId}/logging-configuration", + "responseCode":200 + }, + "input":{"shape":"PutSipMediaApplicationLoggingConfigurationRequest"}, + "output":{"shape":"PutSipMediaApplicationLoggingConfigurationResponse"}, + "errors":[ + {"shape":"UnauthorizedClientException"}, + {"shape":"NotFoundException"}, + {"shape":"ForbiddenException"}, + {"shape":"BadRequestException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Updates the logging configuration for the specified SIP media application.

" + }, + "PutVoiceConnectorEmergencyCallingConfiguration":{ + "name":"PutVoiceConnectorEmergencyCallingConfiguration", + "http":{ + "method":"PUT", + "requestUri":"/voice-connectors/{voiceConnectorId}/emergency-calling-configuration", + "responseCode":200 + }, + "input":{"shape":"PutVoiceConnectorEmergencyCallingConfigurationRequest"}, + "output":{"shape":"PutVoiceConnectorEmergencyCallingConfigurationResponse"}, + "errors":[ + {"shape":"UnauthorizedClientException"}, + {"shape":"NotFoundException"}, + {"shape":"ForbiddenException"}, + {"shape":"BadRequestException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Puts emergency calling configuration details to the specified Amazon Chime Voice Connector, such as emergency phone numbers and calling countries. Origination and termination settings must be enabled for the Amazon Chime Voice Connector before emergency calling can be configured.

" + }, + "PutVoiceConnectorLoggingConfiguration":{ + "name":"PutVoiceConnectorLoggingConfiguration", + "http":{ + "method":"PUT", + "requestUri":"/voice-connectors/{voiceConnectorId}/logging-configuration", + "responseCode":200 + }, + "input":{"shape":"PutVoiceConnectorLoggingConfigurationRequest"}, + "output":{"shape":"PutVoiceConnectorLoggingConfigurationResponse"}, + "errors":[ + {"shape":"UnauthorizedClientException"}, + {"shape":"NotFoundException"}, + {"shape":"ForbiddenException"}, + {"shape":"BadRequestException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Adds a logging configuration for the specified Amazon Chime Voice Connector. The logging configuration specifies whether SIP message logs are enabled for sending to Amazon CloudWatch Logs.

" + }, + "PutVoiceConnectorOrigination":{ + "name":"PutVoiceConnectorOrigination", + "http":{ + "method":"PUT", + "requestUri":"/voice-connectors/{voiceConnectorId}/origination", + "responseCode":200 + }, + "input":{"shape":"PutVoiceConnectorOriginationRequest"}, + "output":{"shape":"PutVoiceConnectorOriginationResponse"}, + "errors":[ + {"shape":"UnauthorizedClientException"}, + {"shape":"NotFoundException"}, + {"shape":"ForbiddenException"}, + {"shape":"BadRequestException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Adds origination settings for the specified Amazon Chime Voice Connector.

If emergency calling is configured for the Amazon Chime Voice Connector, it must be deleted prior to turning off origination settings.

" + }, + "PutVoiceConnectorProxy":{ + "name":"PutVoiceConnectorProxy", + "http":{ + "method":"PUT", + "requestUri":"/voice-connectors/{voiceConnectorId}/programmable-numbers/proxy" + }, + "input":{"shape":"PutVoiceConnectorProxyRequest"}, + "output":{"shape":"PutVoiceConnectorProxyResponse"}, + "errors":[ + {"shape":"UnauthorizedClientException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NotFoundException"}, + {"shape":"ForbiddenException"}, + {"shape":"BadRequestException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Puts the specified proxy configuration to the specified Amazon Chime Voice Connector.

" + }, + "PutVoiceConnectorStreamingConfiguration":{ + "name":"PutVoiceConnectorStreamingConfiguration", + "http":{ + "method":"PUT", + "requestUri":"/voice-connectors/{voiceConnectorId}/streaming-configuration", + "responseCode":200 + }, + "input":{"shape":"PutVoiceConnectorStreamingConfigurationRequest"}, + "output":{"shape":"PutVoiceConnectorStreamingConfigurationResponse"}, + "errors":[ + {"shape":"UnauthorizedClientException"}, + {"shape":"NotFoundException"}, + {"shape":"ForbiddenException"}, + {"shape":"BadRequestException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Adds a streaming configuration for the specified Amazon Chime Voice Connector. The streaming configuration specifies whether media streaming is enabled for sending to Amazon Kinesis. It also sets the retention period, in hours, for the Amazon Kinesis data.

" + }, + "PutVoiceConnectorTermination":{ + "name":"PutVoiceConnectorTermination", + "http":{ + "method":"PUT", + "requestUri":"/voice-connectors/{voiceConnectorId}/termination", + "responseCode":200 + }, + "input":{"shape":"PutVoiceConnectorTerminationRequest"}, + "output":{"shape":"PutVoiceConnectorTerminationResponse"}, + "errors":[ + {"shape":"UnauthorizedClientException"}, + {"shape":"NotFoundException"}, + {"shape":"ForbiddenException"}, + {"shape":"BadRequestException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Adds termination settings for the specified Amazon Chime Voice Connector.

If emergency calling is configured for the Amazon Chime Voice Connector, it must be deleted prior to turning off termination settings.

" + }, + "PutVoiceConnectorTerminationCredentials":{ + "name":"PutVoiceConnectorTerminationCredentials", + "http":{ + "method":"POST", + "requestUri":"/voice-connectors/{voiceConnectorId}/termination/credentials?operation=put", + "responseCode":204 + }, + "input":{"shape":"PutVoiceConnectorTerminationCredentialsRequest"}, + "errors":[ + {"shape":"UnauthorizedClientException"}, + {"shape":"NotFoundException"}, + {"shape":"ForbiddenException"}, + {"shape":"BadRequestException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Adds termination SIP credentials for the specified Amazon Chime Voice Connector.

" + }, + "RedactChannelMessage":{ + "name":"RedactChannelMessage", + "http":{ + "method":"POST", + "requestUri":"/channels/{channelArn}/messages/{messageId}?operation=redact", + "responseCode":200 + }, + "input":{"shape":"RedactChannelMessageRequest"}, + "output":{"shape":"RedactChannelMessageResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Redacts message content, but not metadata. The message exists in the back end, but the action returns null content, and the state shows as redacted.

", + "endpoint":{"hostPrefix":"messaging-"} + }, + "RedactConversationMessage":{ + "name":"RedactConversationMessage", + "http":{ + "method":"POST", + "requestUri":"/accounts/{accountId}/conversations/{conversationId}/messages/{messageId}?operation=redact", + "responseCode":200 + }, + "input":{"shape":"RedactConversationMessageRequest"}, + "output":{"shape":"RedactConversationMessageResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, + {"shape":"BadRequestException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Redacts the specified message from the specified Amazon Chime conversation.

" + }, + "RedactRoomMessage":{ + "name":"RedactRoomMessage", + "http":{ + "method":"POST", + "requestUri":"/accounts/{accountId}/rooms/{roomId}/messages/{messageId}?operation=redact", + "responseCode":200 + }, + "input":{"shape":"RedactRoomMessageRequest"}, + "output":{"shape":"RedactRoomMessageResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, + {"shape":"BadRequestException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Redacts the specified message from the specified Amazon Chime channel.

" + }, + "RegenerateSecurityToken":{ + "name":"RegenerateSecurityToken", + "http":{ + "method":"POST", + "requestUri":"/accounts/{accountId}/bots/{botId}?operation=regenerate-security-token", + "responseCode":200 + }, + "input":{"shape":"RegenerateSecurityTokenRequest"}, + "output":{"shape":"RegenerateSecurityTokenResponse"}, + "errors":[ + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"}, + {"shape":"ForbiddenException"}, + {"shape":"BadRequestException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"NotFoundException"}, + {"shape":"ThrottledClientException"} + ], + "documentation":"

Regenerates the security token for a bot.

" + }, + "ResetPersonalPIN":{ + "name":"ResetPersonalPIN", + "http":{ + "method":"POST", + "requestUri":"/accounts/{accountId}/users/{userId}?operation=reset-personal-pin", + "responseCode":200 + }, + "input":{"shape":"ResetPersonalPINRequest"}, + "output":{"shape":"ResetPersonalPINResponse"}, + "errors":[ + {"shape":"UnauthorizedClientException"}, + {"shape":"NotFoundException"}, + {"shape":"ForbiddenException"}, + {"shape":"BadRequestException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Resets the personal meeting PIN for the specified user on an Amazon Chime account. Returns the User object with the updated personal meeting PIN.

" + }, + "RestorePhoneNumber":{ + "name":"RestorePhoneNumber", + "http":{ + "method":"POST", + "requestUri":"/phone-numbers/{phoneNumberId}?operation=restore", + "responseCode":200 + }, + "input":{"shape":"RestorePhoneNumberRequest"}, + "output":{"shape":"RestorePhoneNumberResponse"}, + "errors":[ + {"shape":"UnauthorizedClientException"}, + {"shape":"NotFoundException"}, + {"shape":"ForbiddenException"}, + {"shape":"BadRequestException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Moves a phone number from the Deletion queue back into the phone number Inventory.

" + }, + "SearchAvailablePhoneNumbers":{ + "name":"SearchAvailablePhoneNumbers", + "http":{ + "method":"GET", + "requestUri":"/search?type=phone-numbers" + }, + "input":{"shape":"SearchAvailablePhoneNumbersRequest"}, + "output":{"shape":"SearchAvailablePhoneNumbersResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"AccessDeniedException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Searches phone numbers that can be ordered.

" + }, + "SendChannelMessage":{ + "name":"SendChannelMessage", + "http":{ + "method":"POST", + "requestUri":"/channels/{channelArn}/messages", + "responseCode":201 + }, + "input":{"shape":"SendChannelMessageRequest"}, + "output":{"shape":"SendChannelMessageResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Sends a message to a particular channel that the member is a part of.

STANDARD messages can contain 4KB of data and the 1KB of metadata. CONTROL messages can contain 30 bytes of data and no metadata.

", + "endpoint":{"hostPrefix":"messaging-"} + }, + "TagAttendee":{ + "name":"TagAttendee", + "http":{ + "method":"POST", + "requestUri":"/meetings/{meetingId}/attendees/{attendeeId}/tags?operation=add", + "responseCode":204 + }, + "input":{"shape":"TagAttendeeRequest"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"NotFoundException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"ThrottledClientException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Applies the specified tags to the specified Amazon Chime SDK attendee.

" + }, + "TagMeeting":{ + "name":"TagMeeting", + "http":{ + "method":"POST", + "requestUri":"/meetings/{meetingId}/tags?operation=add", + "responseCode":204 + }, + "input":{"shape":"TagMeetingRequest"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"NotFoundException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"ThrottledClientException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Applies the specified tags to the specified Amazon Chime SDK meeting.

" + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags?operation=tag-resource", + "responseCode":204 + }, + "input":{"shape":"TagResourceRequest"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"NotFoundException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Applies the specified tags to the specified Amazon Chime SDK meeting resource.

" + }, + "UntagAttendee":{ + "name":"UntagAttendee", + "http":{ + "method":"POST", + "requestUri":"/meetings/{meetingId}/attendees/{attendeeId}/tags?operation=delete", + "responseCode":204 + }, + "input":{"shape":"UntagAttendeeRequest"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"ThrottledClientException"}, + {"shape":"NotFoundException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Untags the specified tags from the specified Amazon Chime SDK attendee.

" + }, + "UntagMeeting":{ + "name":"UntagMeeting", + "http":{ + "method":"POST", + "requestUri":"/meetings/{meetingId}/tags?operation=delete", + "responseCode":204 + }, + "input":{"shape":"UntagMeetingRequest"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"ThrottledClientException"}, + {"shape":"NotFoundException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Untags the specified tags from the specified Amazon Chime SDK meeting.

" + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/tags?operation=untag-resource", + "responseCode":204 + }, + "input":{"shape":"UntagResourceRequest"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"NotFoundException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Untags the specified tags from the specified Amazon Chime SDK meeting resource.

" + }, + "UpdateAccount":{ + "name":"UpdateAccount", + "http":{ + "method":"POST", + "requestUri":"/accounts/{accountId}", + "responseCode":200 + }, + "input":{"shape":"UpdateAccountRequest"}, + "output":{"shape":"UpdateAccountResponse"}, + "errors":[ + {"shape":"UnauthorizedClientException"}, + {"shape":"NotFoundException"}, + {"shape":"ForbiddenException"}, + {"shape":"BadRequestException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Updates account details for the specified Amazon Chime account. Currently, only account name updates are supported for this action.

" + }, + "UpdateAccountSettings":{ + "name":"UpdateAccountSettings", + "http":{ + "method":"PUT", + "requestUri":"/accounts/{accountId}/settings", + "responseCode":204 + }, + "input":{"shape":"UpdateAccountSettingsRequest"}, + "output":{"shape":"UpdateAccountSettingsResponse"}, + "errors":[ + {"shape":"UnauthorizedClientException"}, + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Updates the settings for the specified Amazon Chime account. You can update settings for remote control of shared screens, or for the dial-out option. For more information about these settings, see Use the Policies Page in the Amazon Chime Administration Guide.

" + }, + "UpdateAppInstance":{ + "name":"UpdateAppInstance", + "http":{ + "method":"PUT", + "requestUri":"/app-instances/{appInstanceArn}", + "responseCode":200 + }, + "input":{"shape":"UpdateAppInstanceRequest"}, + "output":{"shape":"UpdateAppInstanceResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, + {"shape":"ForbiddenException"}, + {"shape":"ThrottledClientException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Updates AppInstance metadata.

", + "endpoint":{"hostPrefix":"identity-"} + }, + "UpdateAppInstanceUser":{ + "name":"UpdateAppInstanceUser", + "http":{ + "method":"PUT", + "requestUri":"/app-instance-users/{appInstanceUserArn}", + "responseCode":200 + }, + "input":{"shape":"UpdateAppInstanceUserRequest"}, + "output":{"shape":"UpdateAppInstanceUserResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, + {"shape":"ForbiddenException"}, + {"shape":"ThrottledClientException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Updates the details for an AppInstanceUser. You can update names and metadata.

", + "endpoint":{"hostPrefix":"identity-"} + }, + "UpdateBot":{ + "name":"UpdateBot", + "http":{ + "method":"POST", + "requestUri":"/accounts/{accountId}/bots/{botId}", + "responseCode":200 + }, + "input":{"shape":"UpdateBotRequest"}, + "output":{"shape":"UpdateBotResponse"}, + "errors":[ + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"}, + {"shape":"ForbiddenException"}, + {"shape":"BadRequestException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"NotFoundException"}, + {"shape":"ThrottledClientException"} + ], + "documentation":"

Updates the status of the specified bot, such as starting or stopping the bot from running in your Amazon Chime Enterprise account.

" + }, + "UpdateChannel":{ + "name":"UpdateChannel", + "http":{ + "method":"PUT", + "requestUri":"/channels/{channelArn}", + "responseCode":200 + }, + "input":{"shape":"UpdateChannelRequest"}, + "output":{"shape":"UpdateChannelResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"ConflictException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Update a channel's attributes.

Restriction: You can't change a channel's privacy.

", + "endpoint":{"hostPrefix":"messaging-"} + }, + "UpdateChannelMessage":{ + "name":"UpdateChannelMessage", + "http":{ + "method":"PUT", + "requestUri":"/channels/{channelArn}/messages/{messageId}", + "responseCode":200 + }, + "input":{"shape":"UpdateChannelMessageRequest"}, + "output":{"shape":"UpdateChannelMessageResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Updates the content of a message.

", + "endpoint":{"hostPrefix":"messaging-"} + }, + "UpdateChannelReadMarker":{ + "name":"UpdateChannelReadMarker", + "http":{ + "method":"PUT", + "requestUri":"/channels/{channelArn}/readMarker", + "responseCode":200 + }, + "input":{"shape":"UpdateChannelReadMarkerRequest"}, + "output":{"shape":"UpdateChannelReadMarkerResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"ConflictException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Sets the timestamp to the point when a user last read messages in a channel.

", + "endpoint":{"hostPrefix":"messaging-"} + }, + "UpdateGlobalSettings":{ + "name":"UpdateGlobalSettings", + "http":{ + "method":"PUT", + "requestUri":"/settings", + "responseCode":204 + }, + "input":{"shape":"UpdateGlobalSettingsRequest"}, + "errors":[ + {"shape":"UnauthorizedClientException"}, + {"shape":"ForbiddenException"}, + {"shape":"BadRequestException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Updates global settings for the administrator's AWS account, such as Amazon Chime Business Calling and Amazon Chime Voice Connector settings.

" + }, + "UpdatePhoneNumber":{ + "name":"UpdatePhoneNumber", + "http":{ + "method":"POST", + "requestUri":"/phone-numbers/{phoneNumberId}", + "responseCode":200 + }, + "input":{"shape":"UpdatePhoneNumberRequest"}, + "output":{"shape":"UpdatePhoneNumberResponse"}, + "errors":[ + {"shape":"UnauthorizedClientException"}, + {"shape":"NotFoundException"}, + {"shape":"ForbiddenException"}, + {"shape":"BadRequestException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Updates phone number details, such as product type or calling name, for the specified phone number ID. You can update one phone number detail at a time. For example, you can update either the product type or the calling name in one action.

For toll-free numbers, you must use the Amazon Chime Voice Connector product type.

Updates to outbound calling names can take up to 72 hours to complete. Pending updates to outbound calling names must be complete before you can request another update.

" + }, + "UpdatePhoneNumberSettings":{ + "name":"UpdatePhoneNumberSettings", + "http":{ + "method":"PUT", + "requestUri":"/settings/phone-number", + "responseCode":204 + }, + "input":{"shape":"UpdatePhoneNumberSettingsRequest"}, + "errors":[ + {"shape":"UnauthorizedClientException"}, + {"shape":"ForbiddenException"}, + {"shape":"BadRequestException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Updates the phone number settings for the administrator's AWS account, such as the default outbound calling name. You can update the default outbound calling name once every seven days. Outbound calling names can take up to 72 hours to update.

" + }, + "UpdateProxySession":{ + "name":"UpdateProxySession", + "http":{ + "method":"POST", + "requestUri":"/voice-connectors/{voiceConnectorId}/proxy-sessions/{proxySessionId}", + "responseCode":201 + }, + "input":{"shape":"UpdateProxySessionRequest"}, + "output":{"shape":"UpdateProxySessionResponse"}, + "errors":[ + {"shape":"UnauthorizedClientException"}, + {"shape":"NotFoundException"}, + {"shape":"ForbiddenException"}, + {"shape":"BadRequestException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Updates the specified proxy session details, such as voice or SMS capabilities.

" + }, + "UpdateRoom":{ + "name":"UpdateRoom", + "http":{ + "method":"POST", + "requestUri":"/accounts/{accountId}/rooms/{roomId}", + "responseCode":200 + }, + "input":{"shape":"UpdateRoomRequest"}, + "output":{"shape":"UpdateRoomResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"NotFoundException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Updates room details, such as the room name, for a room in an Amazon Chime Enterprise account.

" + }, + "UpdateRoomMembership":{ + "name":"UpdateRoomMembership", + "http":{ + "method":"POST", + "requestUri":"/accounts/{accountId}/rooms/{roomId}/memberships/{memberId}", + "responseCode":200 + }, + "input":{"shape":"UpdateRoomMembershipRequest"}, + "output":{"shape":"UpdateRoomMembershipResponse"}, + "errors":[ + {"shape":"UnauthorizedClientException"}, + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Updates room membership details, such as the member role, for a room in an Amazon Chime Enterprise account. The member role designates whether the member is a chat room administrator or a general chat room member. The member role can be updated only for user IDs.

" + }, + "UpdateSipMediaApplication":{ + "name":"UpdateSipMediaApplication", + "http":{ + "method":"PUT", + "requestUri":"/sip-media-applications/{sipMediaApplicationId}", + "responseCode":200 + }, + "input":{"shape":"UpdateSipMediaApplicationRequest"}, + "output":{"shape":"UpdateSipMediaApplicationResponse"}, + "errors":[ + {"shape":"UnauthorizedClientException"}, + {"shape":"NotFoundException"}, + {"shape":"ForbiddenException"}, + {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Updates the details for the specified SIP media application.

" + }, + "UpdateSipRule":{ + "name":"UpdateSipRule", + "http":{ + "method":"PUT", + "requestUri":"/sip-rules/{sipRuleId}", + "responseCode":202 + }, + "input":{"shape":"UpdateSipRuleRequest"}, + "output":{"shape":"UpdateSipRuleResponse"}, + "errors":[ + {"shape":"UnauthorizedClientException"}, + {"shape":"NotFoundException"}, + {"shape":"ForbiddenException"}, + {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Updates the details for the specified SIP rule.

" + }, + "UpdateUser":{ + "name":"UpdateUser", + "http":{ + "method":"POST", + "requestUri":"/accounts/{accountId}/users/{userId}", + "responseCode":200 + }, + "input":{"shape":"UpdateUserRequest"}, + "output":{"shape":"UpdateUserResponse"}, + "errors":[ + {"shape":"UnauthorizedClientException"}, + {"shape":"NotFoundException"}, + {"shape":"ForbiddenException"}, + {"shape":"BadRequestException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Updates user details for a specified user ID. Currently, only LicenseType updates are supported for this action.

" + }, + "UpdateUserSettings":{ + "name":"UpdateUserSettings", + "http":{ + "method":"PUT", + "requestUri":"/accounts/{accountId}/users/{userId}/settings", + "responseCode":204 + }, + "input":{"shape":"UpdateUserSettingsRequest"}, + "errors":[ + {"shape":"UnauthorizedClientException"}, + {"shape":"NotFoundException"}, + {"shape":"ForbiddenException"}, + {"shape":"BadRequestException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Updates the settings for the specified user, such as phone number settings.

" + }, + "UpdateVoiceConnector":{ + "name":"UpdateVoiceConnector", + "http":{ + "method":"PUT", + "requestUri":"/voice-connectors/{voiceConnectorId}", + "responseCode":200 + }, + "input":{"shape":"UpdateVoiceConnectorRequest"}, + "output":{"shape":"UpdateVoiceConnectorResponse"}, + "errors":[ + {"shape":"UnauthorizedClientException"}, + {"shape":"NotFoundException"}, + {"shape":"ForbiddenException"}, + {"shape":"BadRequestException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Updates details for the specified Amazon Chime Voice Connector.

" + }, + "UpdateVoiceConnectorGroup":{ + "name":"UpdateVoiceConnectorGroup", + "http":{ + "method":"PUT", + "requestUri":"/voice-connector-groups/{voiceConnectorGroupId}", + "responseCode":202 + }, + "input":{"shape":"UpdateVoiceConnectorGroupRequest"}, + "output":{"shape":"UpdateVoiceConnectorGroupResponse"}, + "errors":[ + {"shape":"UnauthorizedClientException"}, + {"shape":"NotFoundException"}, + {"shape":"ForbiddenException"}, + {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Updates details for the specified Amazon Chime Voice Connector group, such as the name and Amazon Chime Voice Connector priority ranking.

" + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + "Code":{"shape":"ErrorCode"}, + "Message":{"shape":"String"} + }, + "documentation":"

You don't have permissions to perform the requested operation.

", + "error":{"httpStatusCode":403}, + "exception":true + }, + "Account":{ + "type":"structure", + "required":[ + "AwsAccountId", + "AccountId", + "Name" + ], + "members":{ + "AwsAccountId":{ + "shape":"String", + "documentation":"

The AWS account ID.

" + }, + "AccountId":{ + "shape":"String", + "documentation":"

The Amazon Chime account ID.

" + }, + "Name":{ + "shape":"String", + "documentation":"

The Amazon Chime account name.

" + }, + "AccountType":{ + "shape":"AccountType", + "documentation":"

The Amazon Chime account type. For more information about different account types, see Managing Your Amazon Chime Accounts in the Amazon Chime Administration Guide.

" + }, + "CreatedTimestamp":{ + "shape":"Iso8601Timestamp", + "documentation":"

The Amazon Chime account creation timestamp, in ISO 8601 format.

" + }, + "DefaultLicense":{ + "shape":"License", + "documentation":"

The default license for the Amazon Chime account.

" + }, + "SupportedLicenses":{ + "shape":"LicenseList", + "documentation":"

Supported licenses for the Amazon Chime account.

" + }, + "SigninDelegateGroups":{ + "shape":"SigninDelegateGroupList", + "documentation":"

The sign-in delegate groups associated with the account.

" + } + }, + "documentation":"

The Amazon Chime account details. An AWS account can have multiple Amazon Chime accounts.

" + }, + "AccountList":{ + "type":"list", + "member":{"shape":"Account"} + }, + "AccountName":{ + "type":"string", + "max":100, + "min":1, + "pattern":".*\\S.*" + }, + "AccountSettings":{ + "type":"structure", + "members":{ + "DisableRemoteControl":{ + "shape":"Boolean", + "documentation":"

Setting that stops or starts remote control of shared screens during meetings.

" + }, + "EnableDialOut":{ + "shape":"Boolean", + "documentation":"

Setting that allows meeting participants to choose the Call me at a phone number option. For more information, see Join a Meeting without the Amazon Chime App.

" + } + }, + "documentation":"

Settings related to the Amazon Chime account. This includes settings that start or stop remote control of shared screens, or start or stop the dial-out option in the Amazon Chime web application. For more information about these settings, see Use the Policies Page in the Amazon Chime Administration Guide.

" + }, + "AccountType":{ + "type":"string", + "enum":[ + "Team", + "EnterpriseDirectory", + "EnterpriseLWA", + "EnterpriseOIDC" + ] + }, + "AlexaForBusinessMetadata":{ + "type":"structure", + "members":{ + "IsAlexaForBusinessEnabled":{ + "shape":"Boolean", + "documentation":"

Starts or stops Alexa for Business.

" + }, + "AlexaForBusinessRoomArn":{ + "shape":"SensitiveString", + "documentation":"

The ARN of the room resource.

" + } + }, + "documentation":"

The Alexa for Business metadata associated with an Amazon Chime user, used to integrate Alexa for Business with a device.

" + }, + "Alpha2CountryCode":{ + "type":"string", + "pattern":"[A-Z]{2}" + }, + "AppInstance":{ + "type":"structure", + "members":{ + "AppInstanceArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the messaging instance.

" + }, + "Name":{ + "shape":"NonEmptyResourceName", + "documentation":"

The name of an app instance.

" + }, + "Metadata":{ + "shape":"Metadata", + "documentation":"

The metadata of an app instance.

" + }, + "CreatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time at which an app instance was created. In epoch milliseconds.

" + }, + "LastUpdatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time an app instance was last updated. In epoch milliseconds.

" + } + }, + "documentation":"

An instance of a Chime messaging application.

" + }, + "AppInstanceAdmin":{ + "type":"structure", + "members":{ + "Admin":{ + "shape":"Identity", + "documentation":"

The name and metadata of the app instance administrator.

" + }, + "AppInstanceArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the app instance administrator.

" + }, + "CreatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time at which an administrator was created.

" + } + }, + "documentation":"

Promotes a user to the administrator role for the duration of an app instance.

" + }, + "AppInstanceAdminList":{ + "type":"list", + "member":{"shape":"AppInstanceAdminSummary"} + }, + "AppInstanceAdminSummary":{ + "type":"structure", + "members":{ + "Admin":{ + "shape":"Identity", + "documentation":"

The name and metadata of the app instance administrator.

" + } + }, + "documentation":"

The identity and metadata of an administrator.

" + }, + "AppInstanceDataType":{ + "type":"string", + "enum":[ + "Channel", + "ChannelMessage" + ] + }, + "AppInstanceList":{ + "type":"list", + "member":{"shape":"AppInstanceSummary"} + }, + "AppInstanceRetentionSettings":{ + "type":"structure", + "members":{ + "ChannelRetentionSettings":{ + "shape":"ChannelRetentionSettings", + "documentation":"

The length of time in days to retain a channel.

" + } + }, + "documentation":"

The length of time in days to retain messages.

" + }, + "AppInstanceStreamingConfiguration":{ + "type":"structure", + "required":[ + "AppInstanceDataType", + "ResourceArn" + ], + "members":{ + "AppInstanceDataType":{ + "shape":"AppInstanceDataType", + "documentation":"

The data type of the app instance.

" + }, + "ResourceArn":{ + "shape":"Arn", + "documentation":"

The resource ARN.

" + } + }, + "documentation":"

The streaming configuration of an app instance.

" + }, + "AppInstanceStreamingConfigurationList":{ + "type":"list", + "member":{"shape":"AppInstanceStreamingConfiguration"}, + "max":2, + "min":1 + }, + "AppInstanceSummary":{ + "type":"structure", + "members":{ + "AppInstanceArn":{ + "shape":"ChimeArn", + "documentation":"

The app instance ARN.

" + }, + "Name":{ + "shape":"NonEmptyResourceName", + "documentation":"

The name of the app instance summary.

" + }, + "Metadata":{ + "shape":"Metadata", + "documentation":"

The metadata of the app instance summary.

" + } + }, + "documentation":"

The summary data for an app instance.

" + }, + "AppInstanceUser":{ + "type":"structure", + "members":{ + "AppInstanceUserArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the app instance user.

" + }, + "Name":{ + "shape":"UserName", + "documentation":"

The name of the app instance user.

" + }, + "CreatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time at which the app instance user was created.

" + }, + "Metadata":{ + "shape":"Metadata", + "documentation":"

The metadata of the app instance user.

" + }, + "LastUpdatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time at which the app instance user was last updated.

" + } + }, + "documentation":"

The app instance user.

" + }, + "AppInstanceUserList":{ + "type":"list", + "member":{"shape":"AppInstanceUserSummary"} + }, + "AppInstanceUserMembershipSummary":{ + "type":"structure", + "members":{ + "Type":{ + "shape":"ChannelMembershipType", + "documentation":"

The type of channel summary,

" + }, + "ReadMarkerTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time at which a summary was last read.

" + } + }, + "documentation":"

Lists the channels to which app instance users belong.

" + }, + "AppInstanceUserSummary":{ + "type":"structure", + "members":{ + "AppInstanceUserArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the app instance user.

" + }, + "Name":{ + "shape":"UserName", + "documentation":"

The name in an app instance user summary.

" + }, + "Metadata":{ + "shape":"Metadata", + "documentation":"

The metadata in an app instance user summary.

" + } + }, + "documentation":"

The app instance user summary data .

" + }, + "AreaCode":{ + "type":"string", + "pattern":"^$|^[0-9]{3,3}$" + }, + "Arn":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"^arn[\\/\\:\\-\\_\\.a-zA-Z0-9]+$", + "sensitive":true + }, + "AssociatePhoneNumberWithUserRequest":{ + "type":"structure", + "required":[ + "AccountId", + "UserId", + "E164PhoneNumber" + ], + "members":{ + "AccountId":{ + "shape":"String", + "documentation":"

The Amazon Chime account ID.

", + "location":"uri", + "locationName":"accountId" + }, + "UserId":{ + "shape":"String", + "documentation":"

The user ID.

", + "location":"uri", + "locationName":"userId" + }, + "E164PhoneNumber":{ + "shape":"E164PhoneNumber", + "documentation":"

The phone number, in E.164 format.

" + } + } + }, + "AssociatePhoneNumberWithUserResponse":{ + "type":"structure", + "members":{ + } + }, + "AssociatePhoneNumbersWithVoiceConnectorGroupRequest":{ + "type":"structure", + "required":[ + "VoiceConnectorGroupId", + "E164PhoneNumbers" + ], + "members":{ + "VoiceConnectorGroupId":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon Chime Voice Connector group ID.

", + "location":"uri", + "locationName":"voiceConnectorGroupId" + }, + "E164PhoneNumbers":{ + "shape":"E164PhoneNumberList", + "documentation":"

List of phone numbers, in E.164 format.

" + }, + "ForceAssociate":{ + "shape":"NullableBoolean", + "documentation":"

If true, associates the provided phone numbers with the provided Amazon Chime Voice Connector Group and removes any previously existing associations. If false, does not associate any phone numbers that have previously existing associations.

" + } + } + }, + "AssociatePhoneNumbersWithVoiceConnectorGroupResponse":{ + "type":"structure", + "members":{ + "PhoneNumberErrors":{ + "shape":"PhoneNumberErrorList", + "documentation":"

If the action fails for one or more of the phone numbers in the request, a list of the phone numbers is returned, along with error codes and error messages.

" + } + } + }, + "AssociatePhoneNumbersWithVoiceConnectorRequest":{ + "type":"structure", + "required":[ + "VoiceConnectorId", + "E164PhoneNumbers" + ], + "members":{ + "VoiceConnectorId":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon Chime Voice Connector ID.

", + "location":"uri", + "locationName":"voiceConnectorId" + }, + "E164PhoneNumbers":{ + "shape":"E164PhoneNumberList", + "documentation":"

List of phone numbers, in E.164 format.

" + }, + "ForceAssociate":{ + "shape":"NullableBoolean", + "documentation":"

If true, associates the provided phone numbers with the provided Amazon Chime Voice Connector and removes any previously existing associations. If false, does not associate any phone numbers that have previously existing associations.

" + } + } + }, + "AssociatePhoneNumbersWithVoiceConnectorResponse":{ + "type":"structure", + "members":{ + "PhoneNumberErrors":{ + "shape":"PhoneNumberErrorList", + "documentation":"

If the action fails for one or more of the phone numbers in the request, a list of the phone numbers is returned, along with error codes and error messages.

" + } + } + }, + "AssociateSigninDelegateGroupsWithAccountRequest":{ + "type":"structure", + "required":[ + "AccountId", + "SigninDelegateGroups" + ], + "members":{ + "AccountId":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon Chime account ID.

", + "location":"uri", + "locationName":"accountId" + }, + "SigninDelegateGroups":{ + "shape":"SigninDelegateGroupList", + "documentation":"

The sign-in delegate groups.

" + } + } + }, + "AssociateSigninDelegateGroupsWithAccountResponse":{ + "type":"structure", + "members":{ + } + }, + "Attendee":{ + "type":"structure", + "members":{ + "ExternalUserId":{ + "shape":"ExternalUserIdType", + "documentation":"

The Amazon Chime SDK external user ID. An idempotency token. Links the attendee to an identity managed by a builder application. If you create an attendee with the same external user id, the service returns the existing record.

" + }, + "AttendeeId":{ + "shape":"GuidString", + "documentation":"

The Amazon Chime SDK attendee ID.

" + }, + "JoinToken":{ + "shape":"JoinTokenString", + "documentation":"

The join token used by the Amazon Chime SDK attendee.

" + } + }, + "documentation":"

An Amazon Chime SDK meeting attendee. Includes a unique AttendeeId and JoinToken. The JoinToken allows a client to authenticate and join as the specified attendee. The JoinToken expires when the meeting ends or when DeleteAttendee is called. After that, the attendee is unable to join the meeting.

We recommend securely transferring each JoinToken from your server application to the client so that no other client has access to the token except for the one authorized to represent the attendee.

" + }, + "AttendeeList":{ + "type":"list", + "member":{"shape":"Attendee"} + }, + "AttendeeTagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":10, + "min":1 + }, + "AttendeeTagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":10, + "min":1 + }, + "BadRequestException":{ + "type":"structure", + "members":{ + "Code":{"shape":"ErrorCode"}, + "Message":{"shape":"String"} + }, + "documentation":"

The input parameters don't match the service's restrictions.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "BatchCreateAttendeeErrorList":{ + "type":"list", + "member":{"shape":"CreateAttendeeError"} + }, + "BatchCreateAttendeeRequest":{ + "type":"structure", + "required":[ + "MeetingId", + "Attendees" + ], + "members":{ + "MeetingId":{ + "shape":"GuidString", + "documentation":"

The Amazon Chime SDK meeting ID.

", + "location":"uri", + "locationName":"meetingId" + }, + "Attendees":{ + "shape":"CreateAttendeeRequestItemList", + "documentation":"

The request containing the attendees to create.

" + } + } + }, + "BatchCreateAttendeeResponse":{ + "type":"structure", + "members":{ + "Attendees":{ + "shape":"AttendeeList", + "documentation":"

The attendee information, including attendees IDs and join tokens.

" + }, + "Errors":{ + "shape":"BatchCreateAttendeeErrorList", + "documentation":"

If the action fails for one or more of the attendees in the request, a list of the attendees is returned, along with error codes and error messages.

" + } + } + }, + "BatchCreateRoomMembershipRequest":{ + "type":"structure", + "required":[ + "AccountId", + "RoomId", + "MembershipItemList" + ], + "members":{ + "AccountId":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon Chime account ID.

", + "location":"uri", + "locationName":"accountId" + }, + "RoomId":{ + "shape":"NonEmptyString", + "documentation":"

The room ID.

", + "location":"uri", + "locationName":"roomId" + }, + "MembershipItemList":{ + "shape":"MembershipItemList", + "documentation":"

The list of membership items.

" + } + } + }, + "BatchCreateRoomMembershipResponse":{ + "type":"structure", + "members":{ + "Errors":{ + "shape":"MemberErrorList", + "documentation":"

If the action fails for one or more of the member IDs in the request, a list of the member IDs is returned, along with error codes and error messages.

" + } + } + }, + "BatchDeletePhoneNumberRequest":{ + "type":"structure", + "required":["PhoneNumberIds"], + "members":{ + "PhoneNumberIds":{ + "shape":"NonEmptyStringList", + "documentation":"

List of phone number IDs.

" + } + } + }, + "BatchDeletePhoneNumberResponse":{ + "type":"structure", + "members":{ + "PhoneNumberErrors":{ + "shape":"PhoneNumberErrorList", + "documentation":"

If the action fails for one or more of the phone numbers in the request, a list of the phone numbers is returned, along with error codes and error messages.

" + } + } + }, + "BatchSuspendUserRequest":{ + "type":"structure", + "required":[ + "AccountId", + "UserIdList" + ], + "members":{ + "AccountId":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon Chime account ID.

", + "location":"uri", + "locationName":"accountId" + }, + "UserIdList":{ + "shape":"UserIdList", + "documentation":"

The request containing the user IDs to suspend.

" + } + } + }, + "BatchSuspendUserResponse":{ + "type":"structure", + "members":{ + "UserErrors":{ + "shape":"UserErrorList", + "documentation":"

If the BatchSuspendUser action fails for one or more of the user IDs in the request, a list of the user IDs is returned, along with error codes and error messages.

" + } + } + }, + "BatchUnsuspendUserRequest":{ + "type":"structure", + "required":[ + "AccountId", + "UserIdList" + ], + "members":{ + "AccountId":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon Chime account ID.

", + "location":"uri", + "locationName":"accountId" + }, + "UserIdList":{ + "shape":"UserIdList", + "documentation":"

The request containing the user IDs to unsuspend.

" + } + } + }, + "BatchUnsuspendUserResponse":{ + "type":"structure", + "members":{ + "UserErrors":{ + "shape":"UserErrorList", + "documentation":"

If the BatchUnsuspendUser action fails for one or more of the user IDs in the request, a list of the user IDs is returned, along with error codes and error messages.

" + } + } + }, + "BatchUpdatePhoneNumberRequest":{ + "type":"structure", + "required":["UpdatePhoneNumberRequestItems"], + "members":{ + "UpdatePhoneNumberRequestItems":{ + "shape":"UpdatePhoneNumberRequestItemList", + "documentation":"

The request containing the phone number IDs and product types or calling names to update.

" + } + } + }, + "BatchUpdatePhoneNumberResponse":{ + "type":"structure", + "members":{ + "PhoneNumberErrors":{ + "shape":"PhoneNumberErrorList", + "documentation":"

If the action fails for one or more of the phone numbers in the request, a list of the phone numbers is returned, along with error codes and error messages.

" + } + } + }, + "BatchUpdateUserRequest":{ + "type":"structure", + "required":[ + "AccountId", + "UpdateUserRequestItems" + ], + "members":{ + "AccountId":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon Chime account ID.

", + "location":"uri", + "locationName":"accountId" + }, + "UpdateUserRequestItems":{ + "shape":"UpdateUserRequestItemList", + "documentation":"

The request containing the user IDs and details to update.

" + } + } + }, + "BatchUpdateUserResponse":{ + "type":"structure", + "members":{ + "UserErrors":{ + "shape":"UserErrorList", + "documentation":"

If the BatchUpdateUser action fails for one or more of the user IDs in the request, a list of the user IDs is returned, along with error codes and error messages.

" + } + } + }, + "Boolean":{"type":"boolean"}, + "Bot":{ + "type":"structure", + "members":{ + "BotId":{ + "shape":"String", + "documentation":"

The bot ID.

" + }, + "UserId":{ + "shape":"String", + "documentation":"

The unique ID for the bot user.

" + }, + "DisplayName":{ + "shape":"SensitiveString", + "documentation":"

The bot display name.

" + }, + "BotType":{ + "shape":"BotType", + "documentation":"

The bot type.

" + }, + "Disabled":{ + "shape":"NullableBoolean", + "documentation":"

When true, the bot is stopped from running in your account.

" + }, + "CreatedTimestamp":{ + "shape":"Iso8601Timestamp", + "documentation":"

The bot creation timestamp, in ISO 8601 format.

" + }, + "UpdatedTimestamp":{ + "shape":"Iso8601Timestamp", + "documentation":"

The updated bot timestamp, in ISO 8601 format.

" + }, + "BotEmail":{ + "shape":"SensitiveString", + "documentation":"

The bot email address.

" + }, + "SecurityToken":{ + "shape":"SensitiveString", + "documentation":"

The security token used to authenticate Amazon Chime with the outgoing event endpoint.

" + } + }, + "documentation":"

A resource that allows Enterprise account administrators to configure an interface to receive events from Amazon Chime.

" + }, + "BotList":{ + "type":"list", + "member":{"shape":"Bot"} + }, + "BotType":{ + "type":"string", + "enum":["ChatBot"] + }, + "BusinessCallingSettings":{ + "type":"structure", + "members":{ + "CdrBucket":{ + "shape":"String", + "documentation":"

The Amazon S3 bucket designated for call detail record storage.

", + "box":true + } + }, + "documentation":"

The Amazon Chime Business Calling settings for the administrator's AWS account. Includes any Amazon S3 buckets designated for storing call detail records.

" + }, + "CallingName":{ + "type":"string", + "pattern":"^$|^[a-zA-Z0-9 ]{2,15}$", + "sensitive":true + }, + "CallingNameStatus":{ + "type":"string", + "enum":[ + "Unassigned", + "UpdateInProgress", + "UpdateSucceeded", + "UpdateFailed" + ] + }, + "CallingRegion":{"type":"string"}, + "CallingRegionList":{ + "type":"list", + "member":{"shape":"CallingRegion"} + }, + "Capability":{ + "type":"string", + "enum":[ + "Voice", + "SMS" + ] + }, + "CapabilityList":{ + "type":"list", + "member":{"shape":"Capability"} + }, + "Channel":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"NonEmptyResourceName", + "documentation":"

The name of the channel.

" + }, + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel.

" + }, + "Mode":{ + "shape":"ChannelMode", + "documentation":"

The mode of the channel.

" + }, + "Privacy":{ + "shape":"ChannelPrivacy", + "documentation":"

The channel's privacy setting, PUBLIC or HIDDEN.

" + }, + "Metadata":{ + "shape":"Metadata", + "documentation":"

The metadata of the channel.

" + }, + "CreatedBy":{ + "shape":"Identity", + "documentation":"

The administrator who created the channel.

" + }, + "CreatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time at which the administrator created the channel.

" + }, + "LastMessageTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time at which a member sent the last message in a session.

" + }, + "LastUpdatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time at which a channel was last updated.

" + } + }, + "documentation":"

Creates a channel.

" + }, + "ChannelBan":{ + "type":"structure", + "members":{ + "Member":{ + "shape":"Identity", + "documentation":"

The member being banned from the channel.

" + }, + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel from which a member is being banned.

" + }, + "CreatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time at which the ban was created.

" + }, + "CreatedBy":{ + "shape":"Identity", + "documentation":"

The parameter of the action.

" + } + }, + "documentation":"

Bans a user from a channel.

" + }, + "ChannelBanSummary":{ + "type":"structure", + "members":{ + "Member":{ + "shape":"Identity", + "documentation":"

The member being banned from a channel.

" + } + }, + "documentation":"

The summary data for the channel ban.

" + }, + "ChannelBanSummaryList":{ + "type":"list", + "member":{"shape":"ChannelBanSummary"} + }, + "ChannelMembership":{ + "type":"structure", + "members":{ + "InvitedBy":{ + "shape":"Identity", + "documentation":"

The identifier of the member who invited another member. Taken from the message header.

" + }, + "Type":{ + "shape":"ChannelMembershipType", + "documentation":"

The membership type set for the channel member.

" + }, + "Member":{ + "shape":"Identity", + "documentation":"

The data of the channel member.

" + }, + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the member's channel.

" + }, + "CreatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time at which the channel membership was created.

" + }, + "LastUpdatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time at which a channel membership was last updated.

" + } + }, + "documentation":"

Creates a channel member.

" + }, + "ChannelMembershipForAppInstanceUserSummary":{ + "type":"structure", + "members":{ + "ChannelSummary":{"shape":"ChannelSummary"}, + "AppInstanceUserMembershipSummary":{ + "shape":"AppInstanceUserMembershipSummary", + "documentation":"

Returns the channel membership data for an app instance.

" + } + }, + "documentation":"

Returns the channel membership summary data for an app instance.

" + }, + "ChannelMembershipForAppInstanceUserSummaryList":{ + "type":"list", + "member":{"shape":"ChannelMembershipForAppInstanceUserSummary"} + }, + "ChannelMembershipSummary":{ + "type":"structure", + "members":{ + "Member":{ + "shape":"Identity", + "documentation":"

A member's summary data.

" + } + }, + "documentation":"

The summary data of a channel membership.

" + }, + "ChannelMembershipSummaryList":{ + "type":"list", + "member":{"shape":"ChannelMembershipSummary"} + }, + "ChannelMembershipType":{ + "type":"string", + "enum":[ + "DEFAULT", + "HIDDEN" + ] + }, + "ChannelMessage":{ + "type":"structure", + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel.

" + }, + "MessageId":{ + "shape":"MessageId", + "documentation":"

The ID of a message.

" + }, + "Content":{ + "shape":"Content", + "documentation":"

The message content.

" + }, + "Metadata":{ + "shape":"Metadata", + "documentation":"

The message metadata.

" + }, + "Type":{ + "shape":"ChannelMessageType", + "documentation":"

The message type.

" + }, + "CreatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time at which the message was created.

" + }, + "LastEditedTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time at which a message was edited.

" + }, + "LastUpdatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time at which a message was updated.

" + }, + "Sender":{ + "shape":"Identity", + "documentation":"

The message sender.

" + }, + "Redacted":{ + "shape":"NonNullableBoolean", + "documentation":"

Hides the content of a message. The message still exists on the back end, but this action only returns metadata.

" + }, + "Persistence":{"shape":"ChannelMessagePersistenceType"} + }, + "documentation":"

Creates a message in a channel.

" + }, + "ChannelMessagePersistenceType":{ + "type":"string", + "enum":[ + "PERSISTENT", + "NON_PERSISTENT" + ] + }, + "ChannelMessageSummary":{ + "type":"structure", + "members":{ + "MessageId":{ + "shape":"MessageId", + "documentation":"

The ID of the message summary.

" + }, + "Content":{ + "shape":"Content", + "documentation":"

The content of the message summary.

" + }, + "Metadata":{ + "shape":"Metadata", + "documentation":"

The metadata of the message summary.

" + }, + "Type":{ + "shape":"ChannelMessageType", + "documentation":"

The type of message summary.

" + }, + "CreatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time at which the message summary was created.

" + }, + "LastUpdatedTimestamp":{"shape":"Timestamp"}, + "LastEditedTimestamp":{"shape":"Timestamp"}, + "Sender":{ + "shape":"Identity", + "documentation":"

The sender of the message summary.

" + }, + "Redacted":{ + "shape":"NonNullableBoolean", + "documentation":"

Redacts the content of a message summary.

" + } + }, + "documentation":"

A summary of the messages in a channel.

" + }, + "ChannelMessageSummaryList":{ + "type":"list", + "member":{"shape":"ChannelMessageSummary"} + }, + "ChannelMessageType":{ + "type":"string", + "enum":[ + "STANDARD", + "CONTROL" + ] + }, + "ChannelMode":{ + "type":"string", + "enum":[ + "UNRESTRICTED", + "RESTRICTED" + ] + }, + "ChannelModeratedByAppInstanceUserSummary":{ + "type":"structure", + "members":{ + "ChannelSummary":{"shape":"ChannelSummary"} + }, + "documentation":"

Returns the summary data for a moderated channel.

" + }, + "ChannelModeratedByAppInstanceUserSummaryList":{ + "type":"list", + "member":{"shape":"ChannelModeratedByAppInstanceUserSummary"} + }, + "ChannelModerator":{ + "type":"structure", + "members":{ + "Moderator":{ + "shape":"Identity", + "documentation":"

The moderator's data.

" + }, + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the moderator's channel.

" + }, + "CreatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time at which the moderator was created.

" + }, + "CreatedBy":{ + "shape":"Identity", + "documentation":"

The member who created the moderator.

" + } + }, + "documentation":"

Creates a moderator on a channel.

" + }, + "ChannelModeratorSummary":{ + "type":"structure", + "members":{ + "Moderator":{ + "shape":"Identity", + "documentation":"

The data for a moderator.

" + } + }, + "documentation":"

Summary data of the moderators in a channel.

" + }, + "ChannelModeratorSummaryList":{ + "type":"list", + "member":{"shape":"ChannelModeratorSummary"} + }, + "ChannelPrivacy":{ + "type":"string", + "enum":[ + "PUBLIC", + "PRIVATE" + ] + }, + "ChannelRetentionSettings":{ + "type":"structure", + "members":{ + "RetentionDays":{ + "shape":"RetentionDays", + "documentation":"

The time in days to retain a channel.

" + } + }, + "documentation":"

The retention settings for a channel.

" + }, + "ChannelSummary":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"NonEmptyResourceName", + "documentation":"

The parameter of the action.

" + }, + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel summary.

" + }, + "Mode":{ + "shape":"ChannelMode", + "documentation":"

The summary mode of the channel.

" + }, + "Privacy":{ + "shape":"ChannelPrivacy", + "documentation":"

The privacy setting of the channel being summarized, PUBLIC or HIDDEN.

" + }, + "Metadata":{ + "shape":"Metadata", + "documentation":"

The metadata of the channel summary.

" + }, + "LastMessageTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time at which the last message in a channel was sent.

" + } + }, + "documentation":"

The summary data for a channel.

" + }, + "ChannelSummaryList":{ + "type":"list", + "member":{"shape":"ChannelSummary"} + }, + "ChimeArn":{ + "type":"string", + "max":1600, + "min":5, + "pattern":"arn:[a-z0-9-\\.]{1,63}:[a-z0-9-\\.]{0,63}:[a-z0-9-\\.]{0,63}:[a-z0-9-\\.]{0,63}:[^/].{0,1023}" + }, + "ClientRequestToken":{ + "type":"string", + "max":64, + "min":2, + "pattern":"[-_a-zA-Z0-9]*", + "sensitive":true + }, + "ConflictException":{ + "type":"structure", + "members":{ + "Code":{"shape":"ErrorCode"}, + "Message":{"shape":"String"} + }, + "documentation":"

The request could not be processed because of conflict in the current state of the resource.

", + "error":{"httpStatusCode":409}, + "exception":true + }, + "Content":{ + "type":"string", + "max":4096, + "min":0, + "pattern":"[\\s\\S]*", + "sensitive":true + }, + "ConversationRetentionSettings":{ + "type":"structure", + "members":{ + "RetentionDays":{ + "shape":"RetentionDays", + "documentation":"

The number of days for which to retain chat conversation messages.

" + } + }, + "documentation":"

The retention settings that determine how long to retain chat conversation messages for an Amazon Chime Enterprise account.

" + }, + "Country":{ + "type":"string", + "pattern":"^$|^[A-Z]{2,2}$" + }, + "CountryList":{ + "type":"list", + "member":{"shape":"Country"}, + "max":100, + "min":1 + }, + "CpsLimit":{ + "type":"integer", + "min":1 + }, + "CreateAccountRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"AccountName", + "documentation":"

The name of the Amazon Chime account.

" + } + } + }, + "CreateAccountResponse":{ + "type":"structure", + "members":{ + "Account":{"shape":"Account"} + } + }, + "CreateAppInstanceAdminRequest":{ + "type":"structure", + "required":[ + "AppInstanceAdminArn", + "AppInstanceArn" + ], + "members":{ + "AppInstanceAdminArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the administrator of the current app instance.

" + }, + "AppInstanceArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the app instance.

", + "location":"uri", + "locationName":"appInstanceArn" + } + } + }, + "CreateAppInstanceAdminResponse":{ + "type":"structure", + "members":{ + "AppInstanceAdmin":{ + "shape":"Identity", + "documentation":"

The name and ARN of the admin for the app instance.

" + }, + "AppInstanceArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the of the admin for the app instance.

" + } + } + }, + "CreateAppInstanceRequest":{ + "type":"structure", + "required":[ + "Name", + "ClientRequestToken" + ], + "members":{ + "Name":{ + "shape":"NonEmptyResourceName", + "documentation":"

The name of the app instance.

" + }, + "Metadata":{ + "shape":"Metadata", + "documentation":"

The metadata of the app instance. Limited to a 1KB string in UTF-8.

" + }, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

The ClientRequestToken of the app instance.

", + "idempotencyToken":true + } + } + }, + "CreateAppInstanceResponse":{ + "type":"structure", + "members":{ + "AppInstanceArn":{ + "shape":"ChimeArn", + "documentation":"

The Amazon Resource Number (ARN) of the app instance.

" + } + } + }, + "CreateAppInstanceUserRequest":{ + "type":"structure", + "required":[ + "AppInstanceArn", + "AppInstanceUserId", + "Name", + "ClientRequestToken" + ], + "members":{ + "AppInstanceArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the app instance request.

" + }, + "AppInstanceUserId":{ + "shape":"UserId", + "documentation":"

The user ID of the app instance.

" + }, + "Name":{ + "shape":"UserName", + "documentation":"

The user's name.

" + }, + "Metadata":{ + "shape":"Metadata", + "documentation":"

The request's metadata. Limited to a 1KB string in UTF-8.

" + }, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

The token assigned to the user requesting an app instance.

", + "idempotencyToken":true + } + } + }, + "CreateAppInstanceUserResponse":{ + "type":"structure", + "members":{ + "AppInstanceUserArn":{ + "shape":"ChimeArn", + "documentation":"

The user's ARN.

" + } + } + }, + "CreateAttendeeError":{ + "type":"structure", + "members":{ + "ExternalUserId":{ + "shape":"ExternalUserIdType", + "documentation":"

The Amazon Chime SDK external user ID. An idempotency token. Links the attendee to an identity managed by a builder application. If you create an attendee with the same external user id, the service returns the existing record.

" + }, + "ErrorCode":{ + "shape":"String", + "documentation":"

The error code.

" + }, + "ErrorMessage":{ + "shape":"String", + "documentation":"

The error message.

" + } + }, + "documentation":"

The list of errors returned when errors are encountered during the BatchCreateAttendee and CreateAttendee actions. This includes external user IDs, error codes, and error messages.

" + }, + "CreateAttendeeRequest":{ + "type":"structure", + "required":[ + "MeetingId", + "ExternalUserId" + ], + "members":{ + "MeetingId":{ + "shape":"GuidString", + "documentation":"

The Amazon Chime SDK meeting ID.

", + "location":"uri", + "locationName":"meetingId" + }, + "ExternalUserId":{ + "shape":"ExternalUserIdType", + "documentation":"

The Amazon Chime SDK external user ID. An idempotency token. Links the attendee to an identity managed by a builder application. If you create an attendee with the same external user id, the service returns the existing record.

" + }, + "Tags":{ + "shape":"AttendeeTagList", + "documentation":"

The tag key-value pairs.

" + } + } + }, + "CreateAttendeeRequestItem":{ + "type":"structure", + "required":["ExternalUserId"], + "members":{ + "ExternalUserId":{ + "shape":"ExternalUserIdType", + "documentation":"

The Amazon Chime SDK external user ID. An idempotency token. Links the attendee to an identity managed by a builder application. If you create an attendee with the same external user id, the service returns the existing record.

The Amazon Chime SDK external user ID. Links the attendee to an identity managed by a builder application.

" + }, + "Tags":{ + "shape":"AttendeeTagList", + "documentation":"

The tag key-value pairs.

" + } + }, + "documentation":"

The Amazon Chime SDK attendee fields to create, used with the BatchCreateAttendee action.

" + }, + "CreateAttendeeRequestItemList":{ + "type":"list", + "member":{"shape":"CreateAttendeeRequestItem"} + }, + "CreateAttendeeResponse":{ + "type":"structure", + "members":{ + "Attendee":{ + "shape":"Attendee", + "documentation":"

The attendee information, including attendee ID and join token.

" + } + } + }, + "CreateBotRequest":{ + "type":"structure", + "required":[ + "DisplayName", + "AccountId" + ], + "members":{ + "AccountId":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon Chime account ID.

", + "location":"uri", + "locationName":"accountId" + }, + "DisplayName":{ + "shape":"SensitiveString", + "documentation":"

The bot display name.

" + }, + "Domain":{ + "shape":"NonEmptyString", + "documentation":"

The domain of the Amazon Chime Enterprise account.

" + } + } + }, + "CreateBotResponse":{ + "type":"structure", + "members":{ + "Bot":{ + "shape":"Bot", + "documentation":"

The bot details.

" + } + } + }, + "CreateChannelBanRequest":{ + "type":"structure", + "required":[ + "ChannelArn", + "MemberArn" + ], + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the ban request.

", + "location":"uri", + "locationName":"channelArn" + }, + "MemberArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the member being banned.

" + } + } + }, + "CreateChannelBanResponse":{ + "type":"structure", + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the response to the ban request.

" + }, + "Member":{ + "shape":"Identity", + "documentation":"

The ChannelArn and BannedIdentity of the member in the ban response.

" + } + } + }, + "CreateChannelMembershipRequest":{ + "type":"structure", + "required":[ + "ChannelArn", + "MemberArn", + "Type" + ], + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel to which you're adding users.

", + "location":"uri", + "locationName":"channelArn" + }, + "MemberArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the member you want to add to the channel.

" + }, + "Type":{ + "shape":"ChannelMembershipType", + "documentation":"

The membership type of a user, DEFAULT or HIDDEN. Default members are always returned as part of ListChannelMemberships. Hidden members are only returned if the type filter in ListChannelMemberships equals HIDDEN. Otherwise hidden members are not returned. This is only supported by moderators.

" + } + } + }, + "CreateChannelMembershipResponse":{ + "type":"structure", + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel.

" + }, + "Member":{ + "shape":"Identity", + "documentation":"

The ARN and metadata of the member being added.

" + } + } + }, + "CreateChannelModeratorRequest":{ + "type":"structure", + "required":[ + "ChannelArn", + "ChannelModeratorArn" + ], + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel.

", + "location":"uri", + "locationName":"channelArn" + }, + "ChannelModeratorArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the moderator.

" + } + } + }, + "CreateChannelModeratorResponse":{ + "type":"structure", + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel.

" + }, + "ChannelModerator":{ + "shape":"Identity", + "documentation":"

The ARNs of the channel and the moderator.

" + } + } + }, + "CreateChannelRequest":{ + "type":"structure", + "required":[ + "AppInstanceArn", + "Name", + "ClientRequestToken" + ], + "members":{ + "AppInstanceArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel request.

" + }, + "Name":{ + "shape":"NonEmptyResourceName", + "documentation":"

The name of the channel.

" + }, + "Mode":{ + "shape":"ChannelMode", + "documentation":"

The channel mode: UNRESTRICTED or RESTRICTED. Administrators, moderators, and channel members can add themselves and other members to unrestricted channels. Only administrators and moderators can add members to restricted channels.

" + }, + "Privacy":{ + "shape":"ChannelPrivacy", + "documentation":"

The channel's privacy level: PUBLIC or PRIVATE. Private channels aren't discoverable by users outside the channel. Public channels are discoverable by anyone in the app instance.

" + }, + "Metadata":{ + "shape":"Metadata", + "documentation":"

The metadata of the creation request. Limited to 1KB and UTF-8.

" + }, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

The client token for the request. An Idempotency token.

", + "idempotencyToken":true + }, + "Tags":{"shape":"TagList"} + } + }, + "CreateChannelResponse":{ + "type":"structure", + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel.

" + } + } + }, + "CreateMeetingDialOutRequest":{ + "type":"structure", + "required":[ + "FromPhoneNumber", + "ToPhoneNumber", + "JoinToken", + "MeetingId" + ], + "members":{ + "MeetingId":{ + "shape":"GuidString", + "documentation":"

The Amazon Chime SDK meeting ID.

Type: String

Pattern: [a-fA-F0-9]{8}(?:-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}

Required: No

", + "location":"uri", + "locationName":"meetingId" + }, + "FromPhoneNumber":{ + "shape":"E164PhoneNumber", + "documentation":"

Phone number used as the caller ID when the remote party receives a call.

" + }, + "ToPhoneNumber":{ + "shape":"E164PhoneNumber", + "documentation":"

Phone number called when inviting someone to a meeting.

" + }, + "JoinToken":{ + "shape":"JoinTokenString", + "documentation":"

Token used by the Amazon Chime SDK attendee. Call the CreateAttendee API to get a join token.

" + } + } + }, + "CreateMeetingDialOutResponse":{ + "type":"structure", + "members":{ + "TransactionId":{ + "shape":"GuidString", + "documentation":"

Unique ID that tracks API calls.

" + } + } + }, + "CreateMeetingRequest":{ + "type":"structure", + "required":["ClientRequestToken"], + "members":{ + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

The unique identifier for the client request. Use a different token for different meetings.

", + "idempotencyToken":true + }, + "ExternalMeetingId":{ + "shape":"ExternalMeetingIdType", + "documentation":"

The external meeting ID.

" + }, + "MeetingHostId":{ + "shape":"ExternalUserIdType", + "documentation":"

Reserved.

" + }, + "MediaRegion":{ + "shape":"String", + "documentation":"

The Region in which to create the meeting. Default: us-east-1.

Available values: af-south-1, ap-northeast-1, ap-northeast-2, ap-south-1, ap-southeast-1, ap-southeast-2, ca-central-1, eu-central-1, eu-north-1, eu-south-1, eu-west-1, eu-west-2, eu-west-3, sa-east-1, us-east-1, us-east-2, us-west-1, us-west-2.

" + }, + "Tags":{ + "shape":"MeetingTagList", + "documentation":"

The tag key-value pairs.

" + }, + "NotificationsConfiguration":{ + "shape":"MeetingNotificationConfiguration", + "documentation":"

The configuration for resource targets to receive notifications when meeting and attendee events occur.

" + } + } + }, + "CreateMeetingResponse":{ + "type":"structure", + "members":{ + "Meeting":{ + "shape":"Meeting", + "documentation":"

The meeting information, including the meeting ID and MediaPlacement.

" + } + } + }, + "CreateMeetingWithAttendeesRequest":{ + "type":"structure", + "required":["ClientRequestToken"], + "members":{ + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

The unique identifier for the client request. Use a different token for different meetings.

", + "idempotencyToken":true + }, + "ExternalMeetingId":{ + "shape":"ExternalMeetingIdType", + "documentation":"

The external meeting ID.

" + }, + "MeetingHostId":{ + "shape":"ExternalUserIdType", + "documentation":"

Reserved.

" + }, + "MediaRegion":{ + "shape":"String", + "documentation":"

The Region in which to create the meeting. Default: us-east-1.

Available values: af-south-1, ap-northeast-1, ap-northeast-2, ap-south-1, ap-southeast-1, ap-southeast-2, ca-central-1, eu-central-1, eu-north-1, eu-south-1, eu-west-1, eu-west-2, eu-west-3, sa-east-1, us-east-1, us-east-2, us-west-1, us-west-2.

" + }, + "Tags":{ + "shape":"MeetingTagList", + "documentation":"

The tag key-value pairs.

" + }, + "NotificationsConfiguration":{"shape":"MeetingNotificationConfiguration"}, + "Attendees":{ + "shape":"CreateMeetingWithAttendeesRequestItemList", + "documentation":"

The request containing the attendees to create.

" + } + } + }, + "CreateMeetingWithAttendeesRequestItemList":{ + "type":"list", + "member":{"shape":"CreateAttendeeRequestItem"}, + "max":10, + "min":1 + }, + "CreateMeetingWithAttendeesResponse":{ + "type":"structure", + "members":{ + "Meeting":{"shape":"Meeting"}, + "Attendees":{ + "shape":"AttendeeList", + "documentation":"

The attendee information, including attendees IDs and join tokens.

" + }, + "Errors":{ + "shape":"BatchCreateAttendeeErrorList", + "documentation":"

If the action fails for one or more of the attendees in the request, a list of the attendees is returned, along with error codes and error messages.

" + } + } + }, + "CreatePhoneNumberOrderRequest":{ + "type":"structure", + "required":[ + "ProductType", + "E164PhoneNumbers" + ], + "members":{ + "ProductType":{ + "shape":"PhoneNumberProductType", + "documentation":"

The phone number product type.

" + }, + "E164PhoneNumbers":{ + "shape":"E164PhoneNumberList", + "documentation":"

List of phone numbers, in E.164 format.

" + } + } + }, + "CreatePhoneNumberOrderResponse":{ "type":"structure", "members":{ - "Code":{"shape":"ErrorCode"}, - "Message":{"shape":"String"} - }, - "documentation":"

You don't have permissions to perform the requested operation.

", - "error":{"httpStatusCode":403}, - "exception":true + "PhoneNumberOrder":{ + "shape":"PhoneNumberOrder", + "documentation":"

The phone number order details.

" + } + } }, - "Account":{ + "CreateProxySessionRequest":{ "type":"structure", "required":[ - "AwsAccountId", - "AccountId", - "Name" + "ParticipantPhoneNumbers", + "Capabilities", + "VoiceConnectorId" ], "members":{ - "AwsAccountId":{ - "shape":"String", - "documentation":"

The AWS account ID.

" + "VoiceConnectorId":{ + "shape":"NonEmptyString128", + "documentation":"

The Amazon Chime voice connector ID.

", + "location":"uri", + "locationName":"voiceConnectorId" }, - "AccountId":{ - "shape":"String", - "documentation":"

The Amazon Chime account ID.

" + "ParticipantPhoneNumbers":{ + "shape":"ParticipantPhoneNumberList", + "documentation":"

The participant phone numbers.

" }, "Name":{ - "shape":"String", - "documentation":"

The Amazon Chime account name.

" + "shape":"ProxySessionNameString", + "documentation":"

The name of the proxy session.

" }, - "AccountType":{ - "shape":"AccountType", - "documentation":"

The Amazon Chime account type. For more information about different account types, see Managing Your Amazon Chime Accounts in the Amazon Chime Administration Guide.

" + "ExpiryMinutes":{ + "shape":"PositiveInteger", + "documentation":"

The number of minutes allowed for the proxy session.

" }, - "CreatedTimestamp":{ - "shape":"Iso8601Timestamp", - "documentation":"

The Amazon Chime account creation timestamp, in ISO 8601 format.

" + "Capabilities":{ + "shape":"CapabilityList", + "documentation":"

The proxy session capabilities.

" }, - "DefaultLicense":{ - "shape":"License", - "documentation":"

The default license for the Amazon Chime account.

" + "NumberSelectionBehavior":{ + "shape":"NumberSelectionBehavior", + "documentation":"

The preference for proxy phone number reuse, or stickiness, between the same participants across sessions.

" }, - "SupportedLicenses":{ - "shape":"LicenseList", - "documentation":"

Supported licenses for the Amazon Chime account.

" + "GeoMatchLevel":{ + "shape":"GeoMatchLevel", + "documentation":"

The preference for matching the country or area code of the proxy phone number with that of the first participant.

" }, - "SigninDelegateGroups":{ - "shape":"SigninDelegateGroupList", - "documentation":"

The sign-in delegate groups associated with the account.

" + "GeoMatchParams":{ + "shape":"GeoMatchParams", + "documentation":"

The country and area code for the proxy phone number.

" } - }, - "documentation":"

The Amazon Chime account details. An AWS account can have multiple Amazon Chime accounts.

" - }, - "AccountList":{ - "type":"list", - "member":{"shape":"Account"} + } }, - "AccountName":{ - "type":"string", - "max":100, - "min":1, - "pattern":".*\\S.*" + "CreateProxySessionResponse":{ + "type":"structure", + "members":{ + "ProxySession":{ + "shape":"ProxySession", + "documentation":"

The proxy session details.

" + } + } }, - "AccountSettings":{ + "CreateRoomMembershipRequest":{ "type":"structure", + "required":[ + "AccountId", + "RoomId", + "MemberId" + ], "members":{ - "DisableRemoteControl":{ - "shape":"Boolean", - "documentation":"

Setting that stops or starts remote control of shared screens during meetings.

" + "AccountId":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon Chime account ID.

", + "location":"uri", + "locationName":"accountId" }, - "EnableDialOut":{ - "shape":"Boolean", - "documentation":"

Setting that allows meeting participants to choose the Call me at a phone number option. For more information, see Join a Meeting without the Amazon Chime App.

" + "RoomId":{ + "shape":"NonEmptyString", + "documentation":"

The room ID.

", + "location":"uri", + "locationName":"roomId" + }, + "MemberId":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon Chime member ID (user ID or bot ID).

" + }, + "Role":{ + "shape":"RoomMembershipRole", + "documentation":"

The role of the member.

" } - }, - "documentation":"

Settings related to the Amazon Chime account. This includes settings that start or stop remote control of shared screens, or start or stop the dial-out option in the Amazon Chime web application. For more information about these settings, see Use the Policies Page in the Amazon Chime Administration Guide.

" + } }, - "AccountType":{ - "type":"string", - "enum":[ - "Team", - "EnterpriseDirectory", - "EnterpriseLWA", - "EnterpriseOIDC" - ] + "CreateRoomMembershipResponse":{ + "type":"structure", + "members":{ + "RoomMembership":{ + "shape":"RoomMembership", + "documentation":"

The room membership details.

" + } + } }, - "AlexaForBusinessMetadata":{ + "CreateRoomRequest":{ "type":"structure", + "required":[ + "AccountId", + "Name" + ], "members":{ - "IsAlexaForBusinessEnabled":{ - "shape":"Boolean", - "documentation":"

Starts or stops Alexa for Business.

" + "AccountId":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon Chime account ID.

", + "location":"uri", + "locationName":"accountId" }, - "AlexaForBusinessRoomArn":{ + "Name":{ "shape":"SensitiveString", - "documentation":"

The ARN of the room resource.

" + "documentation":"

The room name.

" + }, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

The idempotency token for the request.

", + "idempotencyToken":true } - }, - "documentation":"

The Alexa for Business metadata associated with an Amazon Chime user, used to integrate Alexa for Business with a device.

" + } }, - "Alpha2CountryCode":{ - "type":"string", - "pattern":"[A-Z]{2}" + "CreateRoomResponse":{ + "type":"structure", + "members":{ + "Room":{ + "shape":"Room", + "documentation":"

The room details.

" + } + } }, - "AreaCode":{ - "type":"string", - "pattern":"^$|^[0-9]{3,3}$" + "CreateSipMediaApplicationCallRequest":{ + "type":"structure", + "required":["SipMediaApplicationId"], + "members":{ + "FromPhoneNumber":{ + "shape":"E164PhoneNumber", + "documentation":"

The phone number that a user calls from.

" + }, + "ToPhoneNumber":{ + "shape":"E164PhoneNumber", + "documentation":"

The phone number that the user dials in order to connect to a meeting

" + }, + "SipMediaApplicationId":{ + "shape":"NonEmptyString", + "documentation":"

The ID of the SIP media application.

", + "location":"uri", + "locationName":"sipMediaApplicationId" + } + } }, - "Arn":{ - "type":"string", - "max":1024, - "min":1, - "pattern":"^arn[\\/\\:\\-\\_\\.a-zA-Z0-9]+$", - "sensitive":true + "CreateSipMediaApplicationCallResponse":{ + "type":"structure", + "members":{ + "SipMediaApplicationCall":{ + "shape":"SipMediaApplicationCall", + "documentation":"

The actual call.

" + } + } }, - "AssociatePhoneNumberWithUserRequest":{ + "CreateSipMediaApplicationRequest":{ "type":"structure", "required":[ - "AccountId", - "UserId", - "E164PhoneNumber" + "AwsRegion", + "Endpoints" ], "members":{ - "AccountId":{ + "AwsRegion":{ "shape":"String", + "documentation":"

AWS Region assigned to the SIP media application.

" + }, + "Name":{ + "shape":"SipMediaApplicationName", + "documentation":"

The SIP media application name.

" + }, + "Endpoints":{ + "shape":"SipMediaApplicationEndpointList", + "documentation":"

List of endpoints (Lambda Amazon Resource Names) specified for the SIP media application. Currently, only one endpoint is supported.

" + } + } + }, + "CreateSipMediaApplicationResponse":{ + "type":"structure", + "members":{ + "SipMediaApplication":{ + "shape":"SipMediaApplication", + "documentation":"

The Sip media application details.

" + } + } + }, + "CreateSipRuleRequest":{ + "type":"structure", + "required":[ + "Name", + "TriggerType", + "TriggerValue", + "TargetApplications" + ], + "members":{ + "Name":{ + "shape":"SipRuleName", + "documentation":"

The name of the SIP rule.

" + }, + "TriggerType":{ + "shape":"SipRuleTriggerType", + "documentation":"

The type of trigger whose value is assigned to the SIP rule in TriggerValue. Allowed trigger values are RequestUriHostname and ToPhoneNumber.

" + }, + "TriggerValue":{ + "shape":"NonEmptyString", + "documentation":"

If TriggerType is RequestUriHostname then the value can be the outbound host name of an Amazon Chime Voice Connector. If TriggerType is ToPhoneNumber then the value can be a customer-owned phone number in E164 format. SipRule is triggered if the SIP application requests a host name, or a If TriggerType is RequestUriHostname, then the value can be the outbound hostname of an Amazon Chime Voice Connector. If TriggerType is ToPhoneNumber, then the value can be a customer-owned phone number in E164 format. SipRule is triggered if the SIP application requests a host name, or a ToPhoneNumber value matches the incoming SIP request.

" + }, + "Disabled":{ + "shape":"NullableBoolean", + "documentation":"

Enables or disables a rule. You must disable rules before you can delete them.

" + }, + "TargetApplications":{ + "shape":"SipRuleTargetApplicationList", + "documentation":"

List of SIP media applications with priority and AWS Region. Only one SIP application per AWS Region can be used.

" + } + } + }, + "CreateSipRuleResponse":{ + "type":"structure", + "members":{ + "SipRule":{ + "shape":"SipRule", + "documentation":"

Returns the SIP rule information, including the rule ID, triggers, and target applications.

" + } + } + }, + "CreateUserRequest":{ + "type":"structure", + "required":["AccountId"], + "members":{ + "AccountId":{ + "shape":"NonEmptyString", "documentation":"

The Amazon Chime account ID.

", "location":"uri", "locationName":"accountId" }, - "UserId":{ + "Username":{ "shape":"String", - "documentation":"

The user ID.

", - "location":"uri", - "locationName":"userId" + "documentation":"

The user name.

" + }, + "Email":{ + "shape":"EmailAddress", + "documentation":"

The user's email address.

" }, - "E164PhoneNumber":{ - "shape":"E164PhoneNumber", - "documentation":"

The phone number, in E.164 format.

" + "UserType":{ + "shape":"UserType", + "documentation":"

The user type.

" } } }, - "AssociatePhoneNumberWithUserResponse":{ + "CreateUserResponse":{ "type":"structure", "members":{ + "User":{"shape":"User"} } }, - "AssociatePhoneNumbersWithVoiceConnectorGroupRequest":{ + "CreateVoiceConnectorGroupRequest":{ "type":"structure", - "required":[ - "VoiceConnectorGroupId", - "E164PhoneNumbers" - ], + "required":["Name"], "members":{ - "VoiceConnectorGroupId":{ - "shape":"NonEmptyString", - "documentation":"

The Amazon Chime Voice Connector group ID.

", - "location":"uri", - "locationName":"voiceConnectorGroupId" - }, - "E164PhoneNumbers":{ - "shape":"E164PhoneNumberList", - "documentation":"

List of phone numbers, in E.164 format.

" + "Name":{ + "shape":"VoiceConnectorGroupName", + "documentation":"

The name of the Amazon Chime Voice Connector group.

" }, - "ForceAssociate":{ - "shape":"NullableBoolean", - "documentation":"

If true, associates the provided phone numbers with the provided Amazon Chime Voice Connector Group and removes any previously existing associations. If false, does not associate any phone numbers that have previously existing associations.

" + "VoiceConnectorItems":{ + "shape":"VoiceConnectorItemList", + "documentation":"

The Amazon Chime Voice Connectors to route inbound calls to.

" } } }, - "AssociatePhoneNumbersWithVoiceConnectorGroupResponse":{ + "CreateVoiceConnectorGroupResponse":{ "type":"structure", "members":{ - "PhoneNumberErrors":{ - "shape":"PhoneNumberErrorList", - "documentation":"

If the action fails for one or more of the phone numbers in the request, a list of the phone numbers is returned, along with error codes and error messages.

" + "VoiceConnectorGroup":{ + "shape":"VoiceConnectorGroup", + "documentation":"

The Amazon Chime Voice Connector group details.

" } } }, - "AssociatePhoneNumbersWithVoiceConnectorRequest":{ + "CreateVoiceConnectorRequest":{ "type":"structure", "required":[ - "VoiceConnectorId", - "E164PhoneNumbers" + "Name", + "RequireEncryption" ], "members":{ - "VoiceConnectorId":{ - "shape":"NonEmptyString", - "documentation":"

The Amazon Chime Voice Connector ID.

", - "location":"uri", - "locationName":"voiceConnectorId" + "Name":{ + "shape":"VoiceConnectorName", + "documentation":"

The name of the Amazon Chime Voice Connector.

" }, - "E164PhoneNumbers":{ - "shape":"E164PhoneNumberList", - "documentation":"

List of phone numbers, in E.164 format.

" + "AwsRegion":{ + "shape":"VoiceConnectorAwsRegion", + "documentation":"

The AWS Region in which the Amazon Chime Voice Connector is created. Default value: us-east-1.

" }, - "ForceAssociate":{ - "shape":"NullableBoolean", - "documentation":"

If true, associates the provided phone numbers with the provided Amazon Chime Voice Connector and removes any previously existing associations. If false, does not associate any phone numbers that have previously existing associations.

" + "RequireEncryption":{ + "shape":"Boolean", + "documentation":"

When enabled, requires encryption for the Amazon Chime Voice Connector.

" } } }, - "AssociatePhoneNumbersWithVoiceConnectorResponse":{ + "CreateVoiceConnectorResponse":{ "type":"structure", "members":{ - "PhoneNumberErrors":{ - "shape":"PhoneNumberErrorList", - "documentation":"

If the action fails for one or more of the phone numbers in the request, a list of the phone numbers is returned, along with error codes and error messages.

" + "VoiceConnector":{ + "shape":"VoiceConnector", + "documentation":"

The Amazon Chime Voice Connector details.

" } } }, - "AssociateSigninDelegateGroupsWithAccountRequest":{ + "Credential":{ + "type":"structure", + "members":{ + "Username":{ + "shape":"SensitiveString", + "documentation":"

The RFC2617 compliant user name associated with the SIP credentials, in US-ASCII format.

" + }, + "Password":{ + "shape":"SensitiveString", + "documentation":"

The RFC2617 compliant password associated with the SIP credentials, in US-ASCII format.

" + } + }, + "documentation":"

The SIP credentials used to authenticate requests to your Amazon Chime Voice Connector.

" + }, + "CredentialList":{ + "type":"list", + "member":{"shape":"Credential"} + }, + "DNISEmergencyCallingConfiguration":{ "type":"structure", "required":[ - "AccountId", - "SigninDelegateGroups" + "EmergencyPhoneNumber", + "CallingCountry" ], + "members":{ + "EmergencyPhoneNumber":{ + "shape":"E164PhoneNumber", + "documentation":"

The DNIS phone number to route emergency calls to, in E.164 format.

" + }, + "TestPhoneNumber":{ + "shape":"E164PhoneNumber", + "documentation":"

The DNIS phone number to route test emergency calls to, in E.164 format.

" + }, + "CallingCountry":{ + "shape":"Alpha2CountryCode", + "documentation":"

The country from which emergency calls are allowed, in ISO 3166-1 alpha-2 format.

" + } + }, + "documentation":"

The Dialed Number Identification Service (DNIS) emergency calling configuration details associated with an Amazon Chime Voice Connector's emergency calling configuration.

" + }, + "DNISEmergencyCallingConfigurationList":{ + "type":"list", + "member":{"shape":"DNISEmergencyCallingConfiguration"} + }, + "DataRetentionInHours":{ + "type":"integer", + "min":0 + }, + "DeleteAccountRequest":{ + "type":"structure", + "required":["AccountId"], "members":{ "AccountId":{ "shape":"NonEmptyString", "documentation":"

The Amazon Chime account ID.

", "location":"uri", "locationName":"accountId" - }, - "SigninDelegateGroups":{ - "shape":"SigninDelegateGroupList", - "documentation":"

The sign-in delegate groups.

" } } }, - "AssociateSigninDelegateGroupsWithAccountResponse":{ + "DeleteAccountResponse":{ "type":"structure", "members":{ } }, - "Attendee":{ + "DeleteAppInstanceAdminRequest":{ "type":"structure", + "required":[ + "AppInstanceAdminArn", + "AppInstanceArn" + ], "members":{ - "ExternalUserId":{ - "shape":"ExternalUserIdType", - "documentation":"

The Amazon Chime SDK external user ID. Links the attendee to an identity managed by a builder application.

" - }, - "AttendeeId":{ - "shape":"GuidString", - "documentation":"

The Amazon Chime SDK attendee ID.

" + "AppInstanceAdminArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the app instance's administrator.

", + "location":"uri", + "locationName":"appInstanceAdminArn" }, - "JoinToken":{ - "shape":"JoinTokenString", - "documentation":"

The join token used by the Amazon Chime SDK attendee.

" + "AppInstanceArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the app instance.

", + "location":"uri", + "locationName":"appInstanceArn" } - }, - "documentation":"

An Amazon Chime SDK meeting attendee. Includes a unique AttendeeId and JoinToken. The JoinToken allows a client to authenticate and join as the specified attendee. The JoinToken expires when the meeting ends or when DeleteAttendee is called. After that, the attendee is unable to join the meeting.

We recommend securely transferring each JoinToken from your server application to the client so that no other client has access to the token except for the one authorized to represent the attendee.

" - }, - "AttendeeList":{ - "type":"list", - "member":{"shape":"Attendee"} - }, - "AttendeeTagKeyList":{ - "type":"list", - "member":{"shape":"TagKey"}, - "max":10, - "min":1 + } }, - "AttendeeTagList":{ - "type":"list", - "member":{"shape":"Tag"}, - "max":10, - "min":1 + "DeleteAppInstanceRequest":{ + "type":"structure", + "required":["AppInstanceArn"], + "members":{ + "AppInstanceArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the app instance.

", + "location":"uri", + "locationName":"appInstanceArn" + } + } }, - "BadRequestException":{ + "DeleteAppInstanceStreamingConfigurationsRequest":{ "type":"structure", + "required":["AppInstanceArn"], "members":{ - "Code":{"shape":"ErrorCode"}, - "Message":{"shape":"String"} - }, - "documentation":"

The input parameters don't match the service's restrictions.

", - "error":{"httpStatusCode":400}, - "exception":true + "AppInstanceArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the streaming configurations being deleted.

", + "location":"uri", + "locationName":"appInstanceArn" + } + } }, - "BatchCreateAttendeeErrorList":{ - "type":"list", - "member":{"shape":"CreateAttendeeError"} + "DeleteAppInstanceUserRequest":{ + "type":"structure", + "required":["AppInstanceUserArn"], + "members":{ + "AppInstanceUserArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the user request being deleted.

", + "location":"uri", + "locationName":"appInstanceUserArn" + } + } }, - "BatchCreateAttendeeRequest":{ + "DeleteAttendeeRequest":{ "type":"structure", "required":[ "MeetingId", - "Attendees" + "AttendeeId" ], "members":{ "MeetingId":{ @@ -2662,112 +5747,115 @@ "location":"uri", "locationName":"meetingId" }, - "Attendees":{ - "shape":"CreateAttendeeRequestItemList", - "documentation":"

The request containing the attendees to create.

" + "AttendeeId":{ + "shape":"GuidString", + "documentation":"

The Amazon Chime SDK attendee ID.

", + "location":"uri", + "locationName":"attendeeId" } } }, - "BatchCreateAttendeeResponse":{ + "DeleteChannelBanRequest":{ "type":"structure", + "required":[ + "ChannelArn", + "MemberArn" + ], "members":{ - "Attendees":{ - "shape":"AttendeeList", - "documentation":"

The attendee information, including attendees IDs and join tokens.

" + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel from which the app instance user was banned.

", + "location":"uri", + "locationName":"channelArn" }, - "Errors":{ - "shape":"BatchCreateAttendeeErrorList", - "documentation":"

If the action fails for one or more of the attendees in the request, a list of the attendees is returned, along with error codes and error messages.

" + "MemberArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the app instance user that you want to reinstate.

", + "location":"uri", + "locationName":"memberArn" } } }, - "BatchCreateRoomMembershipRequest":{ + "DeleteChannelMembershipRequest":{ "type":"structure", "required":[ - "AccountId", - "RoomId", - "MembershipItemList" + "ChannelArn", + "MemberArn" ], "members":{ - "AccountId":{ - "shape":"NonEmptyString", - "documentation":"

The Amazon Chime account ID.

", + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel from which you want to remove the user.

", "location":"uri", - "locationName":"accountId" + "locationName":"channelArn" }, - "RoomId":{ - "shape":"NonEmptyString", - "documentation":"

The room ID.

", + "MemberArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the member that you're removing from the channel.

", "location":"uri", - "locationName":"roomId" - }, - "MembershipItemList":{ - "shape":"MembershipItemList", - "documentation":"

The list of membership items.

" - } - } - }, - "BatchCreateRoomMembershipResponse":{ - "type":"structure", - "members":{ - "Errors":{ - "shape":"MemberErrorList", - "documentation":"

If the action fails for one or more of the member IDs in the request, a list of the member IDs is returned, along with error codes and error messages.

" - } - } - }, - "BatchDeletePhoneNumberRequest":{ - "type":"structure", - "required":["PhoneNumberIds"], - "members":{ - "PhoneNumberIds":{ - "shape":"NonEmptyStringList", - "documentation":"

List of phone number IDs.

" + "locationName":"memberArn" } } }, - "BatchDeletePhoneNumberResponse":{ + "DeleteChannelMessageRequest":{ "type":"structure", - "members":{ - "PhoneNumberErrors":{ - "shape":"PhoneNumberErrorList", - "documentation":"

If the action fails for one or more of the phone numbers in the request, a list of the phone numbers is returned, along with error codes and error messages.

" + "required":[ + "ChannelArn", + "MessageId" + ], + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel.

", + "location":"uri", + "locationName":"channelArn" + }, + "MessageId":{ + "shape":"MessageId", + "documentation":"

The ID of the message being deleted.

", + "location":"uri", + "locationName":"messageId" } } }, - "BatchSuspendUserRequest":{ + "DeleteChannelModeratorRequest":{ "type":"structure", "required":[ - "AccountId", - "UserIdList" + "ChannelArn", + "ChannelModeratorArn" ], "members":{ - "AccountId":{ - "shape":"NonEmptyString", - "documentation":"

The Amazon Chime account ID.

", + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel.

", "location":"uri", - "locationName":"accountId" + "locationName":"channelArn" }, - "UserIdList":{ - "shape":"UserIdList", - "documentation":"

The request containing the user IDs to suspend.

" + "ChannelModeratorArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the moderator being deleted.

", + "location":"uri", + "locationName":"channelModeratorArn" } } }, - "BatchSuspendUserResponse":{ + "DeleteChannelRequest":{ "type":"structure", + "required":["ChannelArn"], "members":{ - "UserErrors":{ - "shape":"UserErrorList", - "documentation":"

If the BatchSuspendUser action fails for one or more of the user IDs in the request, a list of the user IDs is returned, along with error codes and error messages.

" + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel being deleted.

", + "location":"uri", + "locationName":"channelArn" } } }, - "BatchUnsuspendUserRequest":{ + "DeleteEventsConfigurationRequest":{ "type":"structure", "required":[ "AccountId", - "UserIdList" + "BotId" ], "members":{ "AccountId":{ @@ -2776,45 +5864,65 @@ "location":"uri", "locationName":"accountId" }, - "UserIdList":{ - "shape":"UserIdList", - "documentation":"

The request containing the user IDs to unsuspend.

" + "BotId":{ + "shape":"NonEmptyString", + "documentation":"

The bot ID.

", + "location":"uri", + "locationName":"botId" } } }, - "BatchUnsuspendUserResponse":{ + "DeleteMeetingRequest":{ "type":"structure", + "required":["MeetingId"], "members":{ - "UserErrors":{ - "shape":"UserErrorList", - "documentation":"

If the BatchUnsuspendUser action fails for one or more of the user IDs in the request, a list of the user IDs is returned, along with error codes and error messages.

" + "MeetingId":{ + "shape":"GuidString", + "documentation":"

The Amazon Chime SDK meeting ID.

", + "location":"uri", + "locationName":"meetingId" } } }, - "BatchUpdatePhoneNumberRequest":{ + "DeletePhoneNumberRequest":{ "type":"structure", - "required":["UpdatePhoneNumberRequestItems"], + "required":["PhoneNumberId"], "members":{ - "UpdatePhoneNumberRequestItems":{ - "shape":"UpdatePhoneNumberRequestItemList", - "documentation":"

The request containing the phone number IDs and product types or calling names to update.

" + "PhoneNumberId":{ + "shape":"String", + "documentation":"

The phone number ID.

", + "location":"uri", + "locationName":"phoneNumberId" } } }, - "BatchUpdatePhoneNumberResponse":{ + "DeleteProxySessionRequest":{ "type":"structure", + "required":[ + "VoiceConnectorId", + "ProxySessionId" + ], "members":{ - "PhoneNumberErrors":{ - "shape":"PhoneNumberErrorList", - "documentation":"

If the action fails for one or more of the phone numbers in the request, a list of the phone numbers is returned, along with error codes and error messages.

" + "VoiceConnectorId":{ + "shape":"NonEmptyString128", + "documentation":"

The Amazon Chime voice connector ID.

", + "location":"uri", + "locationName":"voiceConnectorId" + }, + "ProxySessionId":{ + "shape":"NonEmptyString128", + "documentation":"

The proxy session ID.

", + "location":"uri", + "locationName":"proxySessionId" } } }, - "BatchUpdateUserRequest":{ + "DeleteRoomMembershipRequest":{ "type":"structure", "required":[ "AccountId", - "UpdateUserRequestItems" + "RoomId", + "MemberId" ], "members":{ "AccountId":{ @@ -2823,649 +5931,641 @@ "location":"uri", "locationName":"accountId" }, - "UpdateUserRequestItems":{ - "shape":"UpdateUserRequestItemList", - "documentation":"

The request containing the user IDs and details to update.

" + "RoomId":{ + "shape":"NonEmptyString", + "documentation":"

The room ID.

", + "location":"uri", + "locationName":"roomId" + }, + "MemberId":{ + "shape":"NonEmptyString", + "documentation":"

The member ID (user ID or bot ID).

", + "location":"uri", + "locationName":"memberId" } } }, - "BatchUpdateUserResponse":{ + "DeleteRoomRequest":{ "type":"structure", + "required":[ + "AccountId", + "RoomId" + ], "members":{ - "UserErrors":{ - "shape":"UserErrorList", - "documentation":"

If the BatchUpdateUser action fails for one or more of the user IDs in the request, a list of the user IDs is returned, along with error codes and error messages.

" + "AccountId":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon Chime account ID.

", + "location":"uri", + "locationName":"accountId" + }, + "RoomId":{ + "shape":"NonEmptyString", + "documentation":"

The chat room ID.

", + "location":"uri", + "locationName":"roomId" } } }, - "Boolean":{"type":"boolean"}, - "Bot":{ + "DeleteSipMediaApplicationRequest":{ "type":"structure", + "required":["SipMediaApplicationId"], "members":{ - "BotId":{ - "shape":"String", - "documentation":"

The bot ID.

" - }, - "UserId":{ - "shape":"String", - "documentation":"

The unique ID for the bot user.

" - }, - "DisplayName":{ - "shape":"SensitiveString", - "documentation":"

The bot display name.

" - }, - "BotType":{ - "shape":"BotType", - "documentation":"

The bot type.

" - }, - "Disabled":{ - "shape":"NullableBoolean", - "documentation":"

When true, the bot is stopped from running in your account.

" - }, - "CreatedTimestamp":{ - "shape":"Iso8601Timestamp", - "documentation":"

The bot creation timestamp, in ISO 8601 format.

" - }, - "UpdatedTimestamp":{ - "shape":"Iso8601Timestamp", - "documentation":"

The updated bot timestamp, in ISO 8601 format.

" - }, - "BotEmail":{ - "shape":"SensitiveString", - "documentation":"

The bot email address.

" - }, - "SecurityToken":{ - "shape":"SensitiveString", - "documentation":"

The security token used to authenticate Amazon Chime with the outgoing event endpoint.

" + "SipMediaApplicationId":{ + "shape":"NonEmptyString", + "documentation":"

The SIP media application ID.

", + "location":"uri", + "locationName":"sipMediaApplicationId" } - }, - "documentation":"

A resource that allows Enterprise account administrators to configure an interface to receive events from Amazon Chime.

" - }, - "BotList":{ - "type":"list", - "member":{"shape":"Bot"} - }, - "BotType":{ - "type":"string", - "enum":["ChatBot"] + } }, - "BusinessCallingSettings":{ + "DeleteSipRuleRequest":{ "type":"structure", + "required":["SipRuleId"], "members":{ - "CdrBucket":{ - "shape":"String", - "documentation":"

The Amazon S3 bucket designated for call detail record storage.

", - "box":true + "SipRuleId":{ + "shape":"NonEmptyString", + "documentation":"

The SIP rule ID.

", + "location":"uri", + "locationName":"sipRuleId" } - }, - "documentation":"

The Amazon Chime Business Calling settings for the administrator's AWS account. Includes any Amazon S3 buckets designated for storing call detail records.

" - }, - "CallingName":{ - "type":"string", - "pattern":"^$|^[a-zA-Z0-9 ]{2,15}$", - "sensitive":true - }, - "CallingNameStatus":{ - "type":"string", - "enum":[ - "Unassigned", - "UpdateInProgress", - "UpdateSucceeded", - "UpdateFailed" - ] - }, - "CallingRegion":{"type":"string"}, - "CallingRegionList":{ - "type":"list", - "member":{"shape":"CallingRegion"} - }, - "Capability":{ - "type":"string", - "enum":[ - "Voice", - "SMS" - ] - }, - "CapabilityList":{ - "type":"list", - "member":{"shape":"Capability"} - }, - "ClientRequestToken":{ - "type":"string", - "max":64, - "min":2, - "pattern":"[-_a-zA-Z0-9]*", - "sensitive":true + } }, - "ConflictException":{ + "DeleteVoiceConnectorEmergencyCallingConfigurationRequest":{ "type":"structure", + "required":["VoiceConnectorId"], "members":{ - "Code":{"shape":"ErrorCode"}, - "Message":{"shape":"String"} - }, - "documentation":"

The request could not be processed because of conflict in the current state of the resource.

", - "error":{"httpStatusCode":409}, - "exception":true + "VoiceConnectorId":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon Chime Voice Connector ID.

", + "location":"uri", + "locationName":"voiceConnectorId" + } + } }, - "ConversationRetentionSettings":{ + "DeleteVoiceConnectorGroupRequest":{ "type":"structure", + "required":["VoiceConnectorGroupId"], "members":{ - "RetentionDays":{ - "shape":"RetentionDays", - "documentation":"

The number of days for which to retain chat conversation messages.

" - } - }, - "documentation":"

The retention settings that determine how long to retain chat conversation messages for an Amazon Chime Enterprise account.

" - }, - "Country":{ - "type":"string", - "pattern":"^$|^[A-Z]{2,2}$" - }, - "CountryList":{ - "type":"list", - "member":{"shape":"Country"}, - "max":100, - "min":1 - }, - "CpsLimit":{ - "type":"integer", - "min":1 + "VoiceConnectorGroupId":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon Chime Voice Connector group ID.

", + "location":"uri", + "locationName":"voiceConnectorGroupId" + } + } }, - "CreateAccountRequest":{ + "DeleteVoiceConnectorOriginationRequest":{ "type":"structure", - "required":["Name"], + "required":["VoiceConnectorId"], "members":{ - "Name":{ - "shape":"AccountName", - "documentation":"

The name of the Amazon Chime account.

" + "VoiceConnectorId":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon Chime Voice Connector ID.

", + "location":"uri", + "locationName":"voiceConnectorId" } } }, - "CreateAccountResponse":{ + "DeleteVoiceConnectorProxyRequest":{ "type":"structure", + "required":["VoiceConnectorId"], "members":{ - "Account":{ - "shape":"Account", - "documentation":"

The Amazon Chime account details.

" + "VoiceConnectorId":{ + "shape":"NonEmptyString128", + "documentation":"

The Amazon Chime Voice Connector ID.

", + "location":"uri", + "locationName":"voiceConnectorId" } } }, - "CreateAttendeeError":{ + "DeleteVoiceConnectorRequest":{ "type":"structure", + "required":["VoiceConnectorId"], "members":{ - "ExternalUserId":{ - "shape":"ExternalUserIdType", - "documentation":"

The Amazon Chime SDK external user ID. Links the attendee to an identity managed by a builder application.

" - }, - "ErrorCode":{ - "shape":"String", - "documentation":"

The error code.

" - }, - "ErrorMessage":{ - "shape":"String", - "documentation":"

The error message.

" + "VoiceConnectorId":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon Chime Voice Connector ID.

", + "location":"uri", + "locationName":"voiceConnectorId" } - }, - "documentation":"

The list of errors returned when errors are encountered during the BatchCreateAttendee and CreateAttendee actions. This includes external user IDs, error codes, and error messages.

" + } }, - "CreateAttendeeRequest":{ + "DeleteVoiceConnectorStreamingConfigurationRequest":{ "type":"structure", - "required":[ - "MeetingId", - "ExternalUserId" - ], + "required":["VoiceConnectorId"], "members":{ - "MeetingId":{ - "shape":"GuidString", - "documentation":"

The Amazon Chime SDK meeting ID.

", + "VoiceConnectorId":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon Chime Voice Connector ID.

", "location":"uri", - "locationName":"meetingId" - }, - "ExternalUserId":{ - "shape":"ExternalUserIdType", - "documentation":"

The Amazon Chime SDK external user ID. Links the attendee to an identity managed by a builder application.

" - }, - "Tags":{ - "shape":"AttendeeTagList", - "documentation":"

The tag key-value pairs.

" + "locationName":"voiceConnectorId" } } }, - "CreateAttendeeRequestItem":{ + "DeleteVoiceConnectorTerminationCredentialsRequest":{ "type":"structure", - "required":["ExternalUserId"], + "required":[ + "Usernames", + "VoiceConnectorId" + ], "members":{ - "ExternalUserId":{ - "shape":"ExternalUserIdType", - "documentation":"

The Amazon Chime SDK external user ID. Links the attendee to an identity managed by a builder application.

" + "VoiceConnectorId":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon Chime Voice Connector ID.

", + "location":"uri", + "locationName":"voiceConnectorId" }, - "Tags":{ - "shape":"AttendeeTagList", - "documentation":"

The tag key-value pairs.

" + "Usernames":{ + "shape":"SensitiveStringList", + "documentation":"

The RFC2617 compliant username associated with the SIP credentials, in US-ASCII format.

" } - }, - "documentation":"

The Amazon Chime SDK attendee fields to create, used with the BatchCreateAttendee action.

" - }, - "CreateAttendeeRequestItemList":{ - "type":"list", - "member":{"shape":"CreateAttendeeRequestItem"} + } }, - "CreateAttendeeResponse":{ + "DeleteVoiceConnectorTerminationRequest":{ "type":"structure", + "required":["VoiceConnectorId"], "members":{ - "Attendee":{ - "shape":"Attendee", - "documentation":"

The attendee information, including attendee ID and join token.

" + "VoiceConnectorId":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon Chime Voice Connector ID.

", + "location":"uri", + "locationName":"voiceConnectorId" } } }, - "CreateBotRequest":{ + "DescribeAppInstanceAdminRequest":{ "type":"structure", "required":[ - "DisplayName", - "AccountId" + "AppInstanceAdminArn", + "AppInstanceArn" ], "members":{ - "AccountId":{ - "shape":"NonEmptyString", - "documentation":"

The Amazon Chime account ID.

", + "AppInstanceAdminArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the app instance administrator.

", "location":"uri", - "locationName":"accountId" + "locationName":"appInstanceAdminArn" }, - "DisplayName":{ - "shape":"SensitiveString", - "documentation":"

The bot display name.

" - }, - "Domain":{ - "shape":"NonEmptyString", - "documentation":"

The domain of the Amazon Chime Enterprise account.

" + "AppInstanceArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the app instance.

", + "location":"uri", + "locationName":"appInstanceArn" } } }, - "CreateBotResponse":{ + "DescribeAppInstanceAdminResponse":{ "type":"structure", "members":{ - "Bot":{ - "shape":"Bot", - "documentation":"

The bot details.

" + "AppInstanceAdmin":{ + "shape":"AppInstanceAdmin", + "documentation":"

The ARN and name of the app instance user, the ARN of the app instance, and the created and last-updated timestamps. All timestamps use epoch milliseconds.

" } } }, - "CreateMeetingRequest":{ + "DescribeAppInstanceRequest":{ "type":"structure", - "required":["ClientRequestToken"], + "required":["AppInstanceArn"], "members":{ - "ClientRequestToken":{ - "shape":"ClientRequestToken", - "documentation":"

The unique identifier for the client request. Use a different token for different meetings.

", - "idempotencyToken":true - }, - "ExternalMeetingId":{ - "shape":"ExternalMeetingIdType", - "documentation":"

The external meeting ID.

" - }, - "MeetingHostId":{ - "shape":"ExternalUserIdType", - "documentation":"

Reserved.

" - }, - "MediaRegion":{ - "shape":"String", - "documentation":"

The Region in which to create the meeting. Default: us-east-1.

Available values: af-south-1, ap-northeast-1, ap-northeast-2, ap-south-1, ap-southeast-1, ap-southeast-2, ca-central-1, eu-central-1, eu-north-1, eu-south-1, eu-west-1, eu-west-2, eu-west-3, sa-east-1, us-east-1, us-east-2, us-west-1, us-west-2.

" - }, - "Tags":{ - "shape":"MeetingTagList", - "documentation":"

The tag key-value pairs.

" - }, - "NotificationsConfiguration":{ - "shape":"MeetingNotificationConfiguration", - "documentation":"

The configuration for resource targets to receive notifications when meeting and attendee events occur.

" + "AppInstanceArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the app instance.

", + "location":"uri", + "locationName":"appInstanceArn" } } }, - "CreateMeetingResponse":{ + "DescribeAppInstanceResponse":{ "type":"structure", "members":{ - "Meeting":{ - "shape":"Meeting", - "documentation":"

The meeting information, including the meeting ID and MediaPlacement.

" + "AppInstance":{ + "shape":"AppInstance", + "documentation":"

The ARN, metadata, created and last-updated timestamps, and the name of the app instance. All timestamps use epoch milliseconds.

" } } }, - "CreateMeetingWithAttendeesRequest":{ + "DescribeAppInstanceUserRequest":{ "type":"structure", - "required":["ClientRequestToken"], + "required":["AppInstanceUserArn"], "members":{ - "ClientRequestToken":{ - "shape":"ClientRequestToken", - "documentation":"

The unique identifier for the client request. Use a different token for different meetings.

", - "idempotencyToken":true - }, - "ExternalMeetingId":{ - "shape":"ExternalMeetingIdType", - "documentation":"

The external meeting ID.

" - }, - "MeetingHostId":{ - "shape":"ExternalUserIdType", - "documentation":"

Reserved.

" - }, - "MediaRegion":{ - "shape":"String", - "documentation":"

The Region in which to create the meeting. Default: us-east-1.

Available values: af-south-1, ap-northeast-1, ap-northeast-2, ap-south-1, ap-southeast-1, ap-southeast-2, ca-central-1, eu-central-1, eu-north-1, eu-south-1, eu-west-1, eu-west-2, eu-west-3, sa-east-1, us-east-1, us-east-2, us-west-1, us-west-2.

" - }, - "Tags":{ - "shape":"MeetingTagList", - "documentation":"

The tag key-value pairs.

" - }, - "NotificationsConfiguration":{"shape":"MeetingNotificationConfiguration"}, - "Attendees":{ - "shape":"CreateMeetingWithAttendeesRequestItemList", - "documentation":"

The request containing the attendees to create.

" + "AppInstanceUserArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the app instance user.

", + "location":"uri", + "locationName":"appInstanceUserArn" } } }, - "CreateMeetingWithAttendeesRequestItemList":{ - "type":"list", - "member":{"shape":"CreateAttendeeRequestItem"}, - "max":10, - "min":1 - }, - "CreateMeetingWithAttendeesResponse":{ + "DescribeAppInstanceUserResponse":{ "type":"structure", "members":{ - "Meeting":{"shape":"Meeting"}, - "Attendees":{ - "shape":"AttendeeList", - "documentation":"

The attendee information, including attendees IDs and join tokens.

" - }, - "Errors":{ - "shape":"BatchCreateAttendeeErrorList", - "documentation":"

If the action fails for one or more of the attendees in the request, a list of the attendees is returned, along with error codes and error messages.

" + "AppInstanceUser":{ + "shape":"AppInstanceUser", + "documentation":"

The name of the app instance user.

" } } }, - "CreatePhoneNumberOrderRequest":{ + "DescribeChannelBanRequest":{ "type":"structure", "required":[ - "ProductType", - "E164PhoneNumbers" + "ChannelArn", + "MemberArn" ], "members":{ - "ProductType":{ - "shape":"PhoneNumberProductType", - "documentation":"

The phone number product type.

" + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel from which the user is banned.

", + "location":"uri", + "locationName":"channelArn" }, - "E164PhoneNumbers":{ - "shape":"E164PhoneNumberList", - "documentation":"

List of phone numbers, in E.164 format.

" + "MemberArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the member being banned.

", + "location":"uri", + "locationName":"memberArn" } } }, - "CreatePhoneNumberOrderResponse":{ + "DescribeChannelBanResponse":{ "type":"structure", "members":{ - "PhoneNumberOrder":{ - "shape":"PhoneNumberOrder", - "documentation":"

The phone number order details.

" + "ChannelBan":{ + "shape":"ChannelBan", + "documentation":"

The the details of the ban.

" } } }, - "CreateProxySessionRequest":{ + "DescribeChannelMembershipForAppInstanceUserRequest":{ "type":"structure", "required":[ - "ParticipantPhoneNumbers", - "Capabilities", - "VoiceConnectorId" + "ChannelArn", + "AppInstanceUserArn" ], "members":{ - "VoiceConnectorId":{ - "shape":"NonEmptyString128", - "documentation":"

The Amazon Chime voice connector ID.

", + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel to which the user belongs.

", "location":"uri", - "locationName":"voiceConnectorId" - }, - "ParticipantPhoneNumbers":{ - "shape":"ParticipantPhoneNumberList", - "documentation":"

The participant phone numbers.

" - }, - "Name":{ - "shape":"ProxySessionNameString", - "documentation":"

The name of the proxy session.

" - }, - "ExpiryMinutes":{ - "shape":"PositiveInteger", - "documentation":"

The number of minutes allowed for the proxy session.

" - }, - "Capabilities":{ - "shape":"CapabilityList", - "documentation":"

The proxy session capabilities.

" - }, - "NumberSelectionBehavior":{ - "shape":"NumberSelectionBehavior", - "documentation":"

The preference for proxy phone number reuse, or stickiness, between the same participants across sessions.

" + "locationName":"channelArn" }, - "GeoMatchLevel":{ - "shape":"GeoMatchLevel", - "documentation":"

The preference for matching the country or area code of the proxy phone number with that of the first participant.

" + "AppInstanceUserArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the user in a channel.

", + "location":"querystring", + "locationName":"app-instance-user-arn" + } + } + }, + "DescribeChannelMembershipForAppInstanceUserResponse":{ + "type":"structure", + "members":{ + "ChannelMembership":{ + "shape":"ChannelMembershipForAppInstanceUserSummary", + "documentation":"

The channel to which a user belongs.

" + } + } + }, + "DescribeChannelMembershipRequest":{ + "type":"structure", + "required":[ + "ChannelArn", + "MemberArn" + ], + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel.

", + "location":"uri", + "locationName":"channelArn" }, - "GeoMatchParams":{ - "shape":"GeoMatchParams", - "documentation":"

The country and area code for the proxy phone number.

" + "MemberArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the member.

", + "location":"uri", + "locationName":"memberArn" } } }, - "CreateProxySessionResponse":{ + "DescribeChannelMembershipResponse":{ "type":"structure", "members":{ - "ProxySession":{ - "shape":"ProxySession", - "documentation":"

The proxy session details.

" + "ChannelMembership":{ + "shape":"ChannelMembership", + "documentation":"

The details of the membership.

" } } }, - "CreateRoomMembershipRequest":{ + "DescribeChannelModeratedByAppInstanceUserRequest":{ "type":"structure", "required":[ - "AccountId", - "RoomId", - "MemberId" + "ChannelArn", + "AppInstanceUserArn" ], "members":{ - "AccountId":{ - "shape":"NonEmptyString", - "documentation":"

The Amazon Chime account ID.

", + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the moderated channel.

", "location":"uri", - "locationName":"accountId" + "locationName":"channelArn" }, - "RoomId":{ - "shape":"NonEmptyString", - "documentation":"

The room ID.

", + "AppInstanceUserArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the app instance user in the moderated channel.

", + "location":"querystring", + "locationName":"app-instance-user-arn" + } + } + }, + "DescribeChannelModeratedByAppInstanceUserResponse":{ + "type":"structure", + "members":{ + "Channel":{ + "shape":"ChannelModeratedByAppInstanceUserSummary", + "documentation":"

The moderated channel.

" + } + } + }, + "DescribeChannelModeratorRequest":{ + "type":"structure", + "required":[ + "ChannelArn", + "ChannelModeratorArn" + ], + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel.

", "location":"uri", - "locationName":"roomId" - }, - "MemberId":{ - "shape":"NonEmptyString", - "documentation":"

The Amazon Chime member ID (user ID or bot ID).

" + "locationName":"channelArn" }, - "Role":{ - "shape":"RoomMembershipRole", - "documentation":"

The role of the member.

" + "ChannelModeratorArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel moderator.

", + "location":"uri", + "locationName":"channelModeratorArn" } } }, - "CreateRoomMembershipResponse":{ + "DescribeChannelModeratorResponse":{ "type":"structure", "members":{ - "RoomMembership":{ - "shape":"RoomMembership", - "documentation":"

The room membership details.

" + "ChannelModerator":{ + "shape":"ChannelModerator", + "documentation":"

The details of the channel moderator.

" } } }, - "CreateRoomRequest":{ + "DescribeChannelRequest":{ + "type":"structure", + "required":["ChannelArn"], + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel.

", + "location":"uri", + "locationName":"channelArn" + } + } + }, + "DescribeChannelResponse":{ + "type":"structure", + "members":{ + "Channel":{ + "shape":"Channel", + "documentation":"

The channel details.

" + } + } + }, + "DisassociatePhoneNumberFromUserRequest":{ "type":"structure", "required":[ "AccountId", - "Name" + "UserId" ], "members":{ "AccountId":{ - "shape":"NonEmptyString", + "shape":"String", "documentation":"

The Amazon Chime account ID.

", "location":"uri", "locationName":"accountId" }, - "Name":{ - "shape":"SensitiveString", - "documentation":"

The room name.

" - }, - "ClientRequestToken":{ - "shape":"ClientRequestToken", - "documentation":"

The idempotency token for the request.

", - "idempotencyToken":true + "UserId":{ + "shape":"String", + "documentation":"

The user ID.

", + "location":"uri", + "locationName":"userId" } } }, - "CreateRoomResponse":{ + "DisassociatePhoneNumberFromUserResponse":{ "type":"structure", "members":{ - "Room":{ - "shape":"Room", - "documentation":"

The room details.

" - } } }, - "CreateUserRequest":{ + "DisassociatePhoneNumbersFromVoiceConnectorGroupRequest":{ "type":"structure", - "required":["AccountId"], + "required":[ + "VoiceConnectorGroupId", + "E164PhoneNumbers" + ], "members":{ - "AccountId":{ + "VoiceConnectorGroupId":{ "shape":"NonEmptyString", - "documentation":"

The Amazon Chime account ID.

", + "documentation":"

The Amazon Chime Voice Connector group ID.

", "location":"uri", - "locationName":"accountId" - }, - "Username":{ - "shape":"String", - "documentation":"

The user name.

" - }, - "Email":{ - "shape":"EmailAddress", - "documentation":"

The user's email address.

" + "locationName":"voiceConnectorGroupId" }, - "UserType":{ - "shape":"UserType", - "documentation":"

The user type.

" + "E164PhoneNumbers":{ + "shape":"E164PhoneNumberList", + "documentation":"

List of phone numbers, in E.164 format.

" } } }, - "CreateUserResponse":{ + "DisassociatePhoneNumbersFromVoiceConnectorGroupResponse":{ "type":"structure", "members":{ - "User":{"shape":"User"} + "PhoneNumberErrors":{ + "shape":"PhoneNumberErrorList", + "documentation":"

If the action fails for one or more of the phone numbers in the request, a list of the phone numbers is returned, along with error codes and error messages.

" + } } }, - "CreateVoiceConnectorGroupRequest":{ + "DisassociatePhoneNumbersFromVoiceConnectorRequest":{ "type":"structure", - "required":["Name"], + "required":[ + "VoiceConnectorId", + "E164PhoneNumbers" + ], "members":{ - "Name":{ - "shape":"VoiceConnectorGroupName", - "documentation":"

The name of the Amazon Chime Voice Connector group.

" + "VoiceConnectorId":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon Chime Voice Connector ID.

", + "location":"uri", + "locationName":"voiceConnectorId" }, - "VoiceConnectorItems":{ - "shape":"VoiceConnectorItemList", - "documentation":"

The Amazon Chime Voice Connectors to route inbound calls to.

" + "E164PhoneNumbers":{ + "shape":"E164PhoneNumberList", + "documentation":"

List of phone numbers, in E.164 format.

" } } }, - "CreateVoiceConnectorGroupResponse":{ + "DisassociatePhoneNumbersFromVoiceConnectorResponse":{ "type":"structure", "members":{ - "VoiceConnectorGroup":{ - "shape":"VoiceConnectorGroup", - "documentation":"

The Amazon Chime Voice Connector group details.

" + "PhoneNumberErrors":{ + "shape":"PhoneNumberErrorList", + "documentation":"

If the action fails for one or more of the phone numbers in the request, a list of the phone numbers is returned, along with error codes and error messages.

" } } }, - "CreateVoiceConnectorRequest":{ + "DisassociateSigninDelegateGroupsFromAccountRequest":{ "type":"structure", "required":[ - "Name", - "RequireEncryption" + "AccountId", + "GroupNames" ], "members":{ - "Name":{ - "shape":"VoiceConnectorName", - "documentation":"

The name of the Amazon Chime Voice Connector.

" - }, - "AwsRegion":{ - "shape":"VoiceConnectorAwsRegion", - "documentation":"

The AWS Region in which the Amazon Chime Voice Connector is created. Default value: us-east-1.

" + "AccountId":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon Chime account ID.

", + "location":"uri", + "locationName":"accountId" }, - "RequireEncryption":{ - "shape":"Boolean", - "documentation":"

When enabled, requires encryption for the Amazon Chime Voice Connector.

" + "GroupNames":{ + "shape":"NonEmptyStringList", + "documentation":"

The sign-in delegate group names.

" } } }, - "CreateVoiceConnectorResponse":{ + "DisassociateSigninDelegateGroupsFromAccountResponse":{ "type":"structure", "members":{ - "VoiceConnector":{ - "shape":"VoiceConnector", - "documentation":"

The Amazon Chime Voice Connector details.

" - } } }, - "Credential":{ + "E164PhoneNumber":{ + "type":"string", + "pattern":"^\\+?[1-9]\\d{1,14}$", + "sensitive":true + }, + "E164PhoneNumberList":{ + "type":"list", + "member":{"shape":"E164PhoneNumber"} + }, + "EmailAddress":{ + "type":"string", + "pattern":".+@.+\\..+", + "sensitive":true + }, + "EmailStatus":{ + "type":"string", + "enum":[ + "NotSent", + "Sent", + "Failed" + ] + }, + "EmergencyCallingConfiguration":{ "type":"structure", "members":{ - "Username":{ + "DNIS":{ + "shape":"DNISEmergencyCallingConfigurationList", + "documentation":"

The Dialed Number Identification Service (DNIS) emergency calling configuration details.

" + } + }, + "documentation":"

The emergency calling configuration details associated with an Amazon Chime Voice Connector.

" + }, + "ErrorCode":{ + "type":"string", + "enum":[ + "BadRequest", + "Conflict", + "Forbidden", + "NotFound", + "PreconditionFailed", + "ResourceLimitExceeded", + "ServiceFailure", + "AccessDenied", + "ServiceUnavailable", + "Throttled", + "Throttling", + "Unauthorized", + "Unprocessable", + "VoiceConnectorGroupAssociationsExist", + "PhoneNumberAssociationsExist" + ] + }, + "EventsConfiguration":{ + "type":"structure", + "members":{ + "BotId":{ + "shape":"String", + "documentation":"

The bot ID.

" + }, + "OutboundEventsHTTPSEndpoint":{ "shape":"SensitiveString", - "documentation":"

The RFC2617 compliant user name associated with the SIP credentials, in US-ASCII format.

" + "documentation":"

HTTPS endpoint that allows a bot to receive outgoing events.

" }, - "Password":{ + "LambdaFunctionArn":{ "shape":"SensitiveString", - "documentation":"

The RFC2617 compliant password associated with the SIP credentials, in US-ASCII format.

" + "documentation":"

Lambda function ARN that allows a bot to receive outgoing events.

" } }, - "documentation":"

The SIP credentials used to authenticate requests to your Amazon Chime Voice Connector.

" + "documentation":"

The configuration that allows a bot to receive outgoing events. Can be either an HTTPS endpoint or a Lambda function ARN.

" + }, + "ExternalMeetingIdType":{ + "type":"string", + "max":64, + "min":2, + "sensitive":true + }, + "ExternalUserIdType":{ + "type":"string", + "max":64, + "min":2, + "sensitive":true + }, + "ForbiddenException":{ + "type":"structure", + "members":{ + "Code":{"shape":"ErrorCode"}, + "Message":{"shape":"String"} + }, + "documentation":"

The client is permanently forbidden from making the request.

", + "error":{"httpStatusCode":403}, + "exception":true }, - "CredentialList":{ - "type":"list", - "member":{"shape":"Credential"} + "FunctionArn":{ + "type":"string", + "max":10000, + "pattern":"arn:(aws[a-zA-Z-]*)?:lambda:[a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1}:\\d{12}:function:[a-zA-Z0-9-_]+(:(\\$LATEST|[a-zA-Z0-9-_]+))?", + "sensitive":true }, - "DNISEmergencyCallingConfiguration":{ + "GeoMatchLevel":{ + "type":"string", + "enum":[ + "Country", + "AreaCode" + ] + }, + "GeoMatchParams":{ "type":"structure", "required":[ - "EmergencyPhoneNumber", - "CallingCountry" + "Country", + "AreaCode" ], "members":{ - "EmergencyPhoneNumber":{ - "shape":"E164PhoneNumber", - "documentation":"

The DNIS phone number to route emergency calls to, in E.164 format.

" - }, - "TestPhoneNumber":{ - "shape":"E164PhoneNumber", - "documentation":"

The DNIS phone number to route test emergency calls to, in E.164 format.

" + "Country":{ + "shape":"Country", + "documentation":"

The country.

" }, - "CallingCountry":{ - "shape":"Alpha2CountryCode", - "documentation":"

The country from which emergency calls are allowed, in ISO 3166-1 alpha-2 format.

" + "AreaCode":{ + "shape":"AreaCode", + "documentation":"

The area code.

" } }, - "documentation":"

The Dialed Number Identification Service (DNIS) emergency calling configuration details associated with an Amazon Chime Voice Connector's emergency calling configuration.

" - }, - "DNISEmergencyCallingConfigurationList":{ - "type":"list", - "member":{"shape":"DNISEmergencyCallingConfiguration"} - }, - "DataRetentionInHours":{ - "type":"integer", - "min":0 + "documentation":"

The country and area code for a proxy phone number in a proxy phone session.

" }, - "DeleteAccountRequest":{ + "GetAccountRequest":{ "type":"structure", "required":["AccountId"], "members":{ @@ -3477,474 +6577,333 @@ } } }, - "DeleteAccountResponse":{ - "type":"structure", - "members":{ - } - }, - "DeleteAttendeeRequest":{ + "GetAccountResponse":{ "type":"structure", - "required":[ - "MeetingId", - "AttendeeId" - ], "members":{ - "MeetingId":{ - "shape":"GuidString", - "documentation":"

The Amazon Chime SDK meeting ID.

", - "location":"uri", - "locationName":"meetingId" - }, - "AttendeeId":{ - "shape":"GuidString", - "documentation":"

The Amazon Chime SDK attendee ID.

", - "location":"uri", - "locationName":"attendeeId" - } + "Account":{"shape":"Account"} } }, - "DeleteEventsConfigurationRequest":{ + "GetAccountSettingsRequest":{ "type":"structure", - "required":[ - "AccountId", - "BotId" - ], + "required":["AccountId"], "members":{ "AccountId":{ "shape":"NonEmptyString", "documentation":"

The Amazon Chime account ID.

", "location":"uri", "locationName":"accountId" - }, - "BotId":{ - "shape":"NonEmptyString", - "documentation":"

The bot ID.

", - "location":"uri", - "locationName":"botId" } } }, - "DeleteMeetingRequest":{ + "GetAccountSettingsResponse":{ "type":"structure", - "required":["MeetingId"], "members":{ - "MeetingId":{ - "shape":"GuidString", - "documentation":"

The Amazon Chime SDK meeting ID.

", - "location":"uri", - "locationName":"meetingId" + "AccountSettings":{ + "shape":"AccountSettings", + "documentation":"

The Amazon Chime account settings.

" } } }, - "DeletePhoneNumberRequest":{ + "GetAppInstanceRetentionSettingsRequest":{ "type":"structure", - "required":["PhoneNumberId"], + "required":["AppInstanceArn"], "members":{ - "PhoneNumberId":{ - "shape":"String", - "documentation":"

The phone number ID.

", + "AppInstanceArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the app instance.

", "location":"uri", - "locationName":"phoneNumberId" + "locationName":"appInstanceArn" } } }, - "DeleteProxySessionRequest":{ + "GetAppInstanceRetentionSettingsResponse":{ "type":"structure", - "required":[ - "VoiceConnectorId", - "ProxySessionId" - ], "members":{ - "VoiceConnectorId":{ - "shape":"NonEmptyString128", - "documentation":"

The Amazon Chime voice connector ID.

", - "location":"uri", - "locationName":"voiceConnectorId" + "AppInstanceRetentionSettings":{ + "shape":"AppInstanceRetentionSettings", + "documentation":"

The retention settings for the app instance.

" }, - "ProxySessionId":{ - "shape":"NonEmptyString128", - "documentation":"

The proxy session ID.

", - "location":"uri", - "locationName":"proxySessionId" + "InitiateDeletionTimestamp":{ + "shape":"Timestamp", + "documentation":"

The timestamp representing the time at which the specified items are retained, in Epoch Seconds.

" } } }, - "DeleteRoomMembershipRequest":{ + "GetAppInstanceStreamingConfigurationsRequest":{ "type":"structure", - "required":[ - "AccountId", - "RoomId", - "MemberId" - ], + "required":["AppInstanceArn"], "members":{ - "AccountId":{ - "shape":"NonEmptyString", - "documentation":"

The Amazon Chime account ID.

", - "location":"uri", - "locationName":"accountId" - }, - "RoomId":{ - "shape":"NonEmptyString", - "documentation":"

The room ID.

", - "location":"uri", - "locationName":"roomId" - }, - "MemberId":{ - "shape":"NonEmptyString", - "documentation":"

The member ID (user ID or bot ID).

", + "AppInstanceArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the app instance.

", "location":"uri", - "locationName":"memberId" + "locationName":"appInstanceArn" } } }, - "DeleteRoomRequest":{ + "GetAppInstanceStreamingConfigurationsResponse":{ "type":"structure", - "required":[ - "AccountId", - "RoomId" - ], "members":{ - "AccountId":{ - "shape":"NonEmptyString", - "documentation":"

The Amazon Chime account ID.

", - "location":"uri", - "locationName":"accountId" - }, - "RoomId":{ - "shape":"NonEmptyString", - "documentation":"

The chat room ID.

", - "location":"uri", - "locationName":"roomId" + "AppInstanceStreamingConfigurations":{ + "shape":"AppInstanceStreamingConfigurationList", + "documentation":"

The streaming settings.

" } } }, - "DeleteVoiceConnectorEmergencyCallingConfigurationRequest":{ + "GetAttendeeRequest":{ "type":"structure", - "required":["VoiceConnectorId"], + "required":[ + "MeetingId", + "AttendeeId" + ], "members":{ - "VoiceConnectorId":{ - "shape":"NonEmptyString", - "documentation":"

The Amazon Chime Voice Connector ID.

", + "MeetingId":{ + "shape":"GuidString", + "documentation":"

The Amazon Chime SDK meeting ID.

", "location":"uri", - "locationName":"voiceConnectorId" - } - } - }, - "DeleteVoiceConnectorGroupRequest":{ - "type":"structure", - "required":["VoiceConnectorGroupId"], - "members":{ - "VoiceConnectorGroupId":{ - "shape":"NonEmptyString", - "documentation":"

The Amazon Chime Voice Connector group ID.

", + "locationName":"meetingId" + }, + "AttendeeId":{ + "shape":"GuidString", + "documentation":"

The Amazon Chime SDK attendee ID.

", "location":"uri", - "locationName":"voiceConnectorGroupId" + "locationName":"attendeeId" } } }, - "DeleteVoiceConnectorOriginationRequest":{ + "GetAttendeeResponse":{ "type":"structure", - "required":["VoiceConnectorId"], "members":{ - "VoiceConnectorId":{ - "shape":"NonEmptyString", - "documentation":"

The Amazon Chime Voice Connector ID.

", - "location":"uri", - "locationName":"voiceConnectorId" + "Attendee":{ + "shape":"Attendee", + "documentation":"

The Amazon Chime SDK attendee information.

" } } }, - "DeleteVoiceConnectorProxyRequest":{ + "GetBotRequest":{ "type":"structure", - "required":["VoiceConnectorId"], + "required":[ + "AccountId", + "BotId" + ], "members":{ - "VoiceConnectorId":{ - "shape":"NonEmptyString128", - "documentation":"

The Amazon Chime Voice Connector ID.

", + "AccountId":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon Chime account ID.

", "location":"uri", - "locationName":"voiceConnectorId" - } - } - }, - "DeleteVoiceConnectorRequest":{ - "type":"structure", - "required":["VoiceConnectorId"], - "members":{ - "VoiceConnectorId":{ + "locationName":"accountId" + }, + "BotId":{ "shape":"NonEmptyString", - "documentation":"

The Amazon Chime Voice Connector ID.

", + "documentation":"

The bot ID.

", "location":"uri", - "locationName":"voiceConnectorId" + "locationName":"botId" } } }, - "DeleteVoiceConnectorStreamingConfigurationRequest":{ + "GetBotResponse":{ "type":"structure", - "required":["VoiceConnectorId"], "members":{ - "VoiceConnectorId":{ - "shape":"NonEmptyString", - "documentation":"

The Amazon Chime Voice Connector ID.

", - "location":"uri", - "locationName":"voiceConnectorId" + "Bot":{ + "shape":"Bot", + "documentation":"

The chat bot details.

" } } }, - "DeleteVoiceConnectorTerminationCredentialsRequest":{ + "GetChannelMessageRequest":{ "type":"structure", "required":[ - "Usernames", - "VoiceConnectorId" + "ChannelArn", + "MessageId" ], "members":{ - "VoiceConnectorId":{ - "shape":"NonEmptyString", - "documentation":"

The Amazon Chime Voice Connector ID.

", + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel.

", "location":"uri", - "locationName":"voiceConnectorId" - }, - "Usernames":{ - "shape":"SensitiveStringList", - "documentation":"

The RFC2617 compliant username associated with the SIP credentials, in US-ASCII format.

" + "locationName":"channelArn" + }, + "MessageId":{ + "shape":"MessageId", + "documentation":"

The ID of the message.

", + "location":"uri", + "locationName":"messageId" } } }, - "DeleteVoiceConnectorTerminationRequest":{ + "GetChannelMessageResponse":{ "type":"structure", - "required":["VoiceConnectorId"], "members":{ - "VoiceConnectorId":{ - "shape":"NonEmptyString", - "documentation":"

The Amazon Chime Voice Connector ID.

", - "location":"uri", - "locationName":"voiceConnectorId" + "ChannelMessage":{ + "shape":"ChannelMessage", + "documentation":"

The details of and content in the message.

" } } }, - "DisassociatePhoneNumberFromUserRequest":{ + "GetEventsConfigurationRequest":{ "type":"structure", "required":[ "AccountId", - "UserId" + "BotId" ], "members":{ "AccountId":{ - "shape":"String", + "shape":"NonEmptyString", "documentation":"

The Amazon Chime account ID.

", "location":"uri", "locationName":"accountId" }, - "UserId":{ - "shape":"String", - "documentation":"

The user ID.

", + "BotId":{ + "shape":"NonEmptyString", + "documentation":"

The bot ID.

", "location":"uri", - "locationName":"userId" + "locationName":"botId" } } }, - "DisassociatePhoneNumberFromUserResponse":{ + "GetEventsConfigurationResponse":{ "type":"structure", "members":{ + "EventsConfiguration":{ + "shape":"EventsConfiguration", + "documentation":"

The events configuration details.

" + } } }, - "DisassociatePhoneNumbersFromVoiceConnectorGroupRequest":{ + "GetGlobalSettingsResponse":{ "type":"structure", - "required":[ - "VoiceConnectorGroupId", - "E164PhoneNumbers" - ], "members":{ - "VoiceConnectorGroupId":{ - "shape":"NonEmptyString", - "documentation":"

The Amazon Chime Voice Connector group ID.

", - "location":"uri", - "locationName":"voiceConnectorGroupId" + "BusinessCalling":{ + "shape":"BusinessCallingSettings", + "documentation":"

The Amazon Chime Business Calling settings.

" }, - "E164PhoneNumbers":{ - "shape":"E164PhoneNumberList", - "documentation":"

List of phone numbers, in E.164 format.

" + "VoiceConnector":{ + "shape":"VoiceConnectorSettings", + "documentation":"

The Amazon Chime Voice Connector settings.

" } } }, - "DisassociatePhoneNumbersFromVoiceConnectorGroupResponse":{ + "GetMeetingRequest":{ "type":"structure", + "required":["MeetingId"], "members":{ - "PhoneNumberErrors":{ - "shape":"PhoneNumberErrorList", - "documentation":"

If the action fails for one or more of the phone numbers in the request, a list of the phone numbers is returned, along with error codes and error messages.

" + "MeetingId":{ + "shape":"GuidString", + "documentation":"

The Amazon Chime SDK meeting ID.

", + "location":"uri", + "locationName":"meetingId" } } }, - "DisassociatePhoneNumbersFromVoiceConnectorRequest":{ + "GetMeetingResponse":{ "type":"structure", - "required":[ - "VoiceConnectorId", - "E164PhoneNumbers" - ], "members":{ - "VoiceConnectorId":{ - "shape":"NonEmptyString", - "documentation":"

The Amazon Chime Voice Connector ID.

", - "location":"uri", - "locationName":"voiceConnectorId" - }, - "E164PhoneNumbers":{ - "shape":"E164PhoneNumberList", - "documentation":"

List of phone numbers, in E.164 format.

" + "Meeting":{ + "shape":"Meeting", + "documentation":"

The Amazon Chime SDK meeting information.

" } } }, - "DisassociatePhoneNumbersFromVoiceConnectorResponse":{ + "GetMessagingSessionEndpointRequest":{ "type":"structure", "members":{ - "PhoneNumberErrors":{ - "shape":"PhoneNumberErrorList", - "documentation":"

If the action fails for one or more of the phone numbers in the request, a list of the phone numbers is returned, along with error codes and error messages.

" - } } }, - "DisassociateSigninDelegateGroupsFromAccountRequest":{ + "GetMessagingSessionEndpointResponse":{ "type":"structure", - "required":[ - "AccountId", - "GroupNames" - ], "members":{ - "AccountId":{ - "shape":"NonEmptyString", - "documentation":"

The Amazon Chime account ID.

", - "location":"uri", - "locationName":"accountId" - }, - "GroupNames":{ - "shape":"NonEmptyStringList", - "documentation":"

The sign-in delegate group names.

" + "Endpoint":{ + "shape":"MessagingSessionEndpoint", + "documentation":"

The endpoint returned in the response.

" } } }, - "DisassociateSigninDelegateGroupsFromAccountResponse":{ + "GetPhoneNumberOrderRequest":{ "type":"structure", + "required":["PhoneNumberOrderId"], "members":{ + "PhoneNumberOrderId":{ + "shape":"GuidString", + "documentation":"

The ID for the phone number order.

", + "location":"uri", + "locationName":"phoneNumberOrderId" + } } }, - "E164PhoneNumber":{ - "type":"string", - "pattern":"^\\+?[1-9]\\d{1,14}$", - "sensitive":true - }, - "E164PhoneNumberList":{ - "type":"list", - "member":{"shape":"E164PhoneNumber"} - }, - "EmailAddress":{ - "type":"string", - "pattern":".+@.+\\..+", - "sensitive":true - }, - "EmailStatus":{ - "type":"string", - "enum":[ - "NotSent", - "Sent", - "Failed" - ] - }, - "EmergencyCallingConfiguration":{ + "GetPhoneNumberOrderResponse":{ "type":"structure", "members":{ - "DNIS":{ - "shape":"DNISEmergencyCallingConfigurationList", - "documentation":"

The Dialed Number Identification Service (DNIS) emergency calling configuration details.

" + "PhoneNumberOrder":{ + "shape":"PhoneNumberOrder", + "documentation":"

The phone number order details.

" } - }, - "documentation":"

The emergency calling configuration details associated with an Amazon Chime Voice Connector.

" - }, - "ErrorCode":{ - "type":"string", - "enum":[ - "BadRequest", - "Conflict", - "Forbidden", - "NotFound", - "PreconditionFailed", - "ResourceLimitExceeded", - "ServiceFailure", - "AccessDenied", - "ServiceUnavailable", - "Throttled", - "Throttling", - "Unauthorized", - "Unprocessable", - "VoiceConnectorGroupAssociationsExist", - "PhoneNumberAssociationsExist" - ] + } }, - "EventsConfiguration":{ + "GetPhoneNumberRequest":{ "type":"structure", + "required":["PhoneNumberId"], "members":{ - "BotId":{ + "PhoneNumberId":{ "shape":"String", - "documentation":"

The bot ID.

" - }, - "OutboundEventsHTTPSEndpoint":{ - "shape":"SensitiveString", - "documentation":"

HTTPS endpoint that allows a bot to receive outgoing events.

" - }, - "LambdaFunctionArn":{ - "shape":"SensitiveString", - "documentation":"

Lambda function ARN that allows a bot to receive outgoing events.

" + "documentation":"

The phone number ID.

", + "location":"uri", + "locationName":"phoneNumberId" } - }, - "documentation":"

The configuration that allows a bot to receive outgoing events. Can be either an HTTPS endpoint or a Lambda function ARN.

" - }, - "ExternalMeetingIdType":{ - "type":"string", - "max":64, - "min":2, - "sensitive":true - }, - "ExternalUserIdType":{ - "type":"string", - "max":64, - "min":2, - "sensitive":true + } }, - "ForbiddenException":{ + "GetPhoneNumberResponse":{ "type":"structure", "members":{ - "Code":{"shape":"ErrorCode"}, - "Message":{"shape":"String"} - }, - "documentation":"

The client is permanently forbidden from making the request. For example, when a user tries to create an account from an unsupported Region.

", - "error":{"httpStatusCode":403}, - "exception":true + "PhoneNumber":{ + "shape":"PhoneNumber", + "documentation":"

The phone number details.

" + } + } }, - "GeoMatchLevel":{ - "type":"string", - "enum":[ - "Country", - "AreaCode" - ] + "GetPhoneNumberSettingsResponse":{ + "type":"structure", + "members":{ + "CallingName":{ + "shape":"CallingName", + "documentation":"

The default outbound calling name for the account.

" + }, + "CallingNameUpdatedTimestamp":{ + "shape":"Iso8601Timestamp", + "documentation":"

The updated outbound calling name timestamp, in ISO 8601 format.

" + } + } }, - "GeoMatchParams":{ + "GetProxySessionRequest":{ "type":"structure", "required":[ - "Country", - "AreaCode" + "VoiceConnectorId", + "ProxySessionId" ], "members":{ - "Country":{ - "shape":"Country", - "documentation":"

The country.

" + "VoiceConnectorId":{ + "shape":"NonEmptyString128", + "documentation":"

The Amazon Chime voice connector ID.

", + "location":"uri", + "locationName":"voiceConnectorId" }, - "AreaCode":{ - "shape":"AreaCode", - "documentation":"

The area code.

" + "ProxySessionId":{ + "shape":"NonEmptyString128", + "documentation":"

The proxy session ID.

", + "location":"uri", + "locationName":"proxySessionId" } - }, - "documentation":"

The country and area code for a proxy phone number in a proxy phone session.

" + } + }, + "GetProxySessionResponse":{ + "type":"structure", + "members":{ + "ProxySession":{ + "shape":"ProxySession", + "documentation":"

The proxy session details.

" + } + } }, - "GetAccountRequest":{ + "GetRetentionSettingsRequest":{ "type":"structure", "required":["AccountId"], "members":{ @@ -3956,71 +6915,117 @@ } } }, - "GetAccountResponse":{ + "GetRetentionSettingsResponse":{ "type":"structure", "members":{ - "Account":{ - "shape":"Account", - "documentation":"

The Amazon Chime account details.

" + "RetentionSettings":{ + "shape":"RetentionSettings", + "documentation":"

The retention settings.

" + }, + "InitiateDeletionTimestamp":{ + "shape":"Iso8601Timestamp", + "documentation":"

The timestamp representing the time at which the specified items are permanently deleted, in ISO 8601 format.

" } } }, - "GetAccountSettingsRequest":{ + "GetRoomRequest":{ "type":"structure", - "required":["AccountId"], + "required":[ + "AccountId", + "RoomId" + ], "members":{ "AccountId":{ "shape":"NonEmptyString", "documentation":"

The Amazon Chime account ID.

", "location":"uri", "locationName":"accountId" + }, + "RoomId":{ + "shape":"NonEmptyString", + "documentation":"

The room ID.

", + "location":"uri", + "locationName":"roomId" } } }, - "GetAccountSettingsResponse":{ + "GetRoomResponse":{ "type":"structure", "members":{ - "AccountSettings":{ - "shape":"AccountSettings", - "documentation":"

The Amazon Chime account settings.

" + "Room":{ + "shape":"Room", + "documentation":"

The room details.

" } } }, - "GetAttendeeRequest":{ + "GetSipMediaApplicationLoggingConfigurationRequest":{ "type":"structure", - "required":[ - "MeetingId", - "AttendeeId" - ], + "required":["SipMediaApplicationId"], "members":{ - "MeetingId":{ - "shape":"GuidString", - "documentation":"

The Amazon Chime SDK meeting ID.

", + "SipMediaApplicationId":{ + "shape":"NonEmptyString", + "documentation":"

The ID of the SIP media application.

", "location":"uri", - "locationName":"meetingId" - }, - "AttendeeId":{ - "shape":"GuidString", - "documentation":"

The Amazon Chime SDK attendee ID.

", + "locationName":"sipMediaApplicationId" + } + } + }, + "GetSipMediaApplicationLoggingConfigurationResponse":{ + "type":"structure", + "members":{ + "SipMediaApplicationLoggingConfiguration":{ + "shape":"SipMediaApplicationLoggingConfiguration", + "documentation":"

The actual logging configuration.

" + } + } + }, + "GetSipMediaApplicationRequest":{ + "type":"structure", + "required":["SipMediaApplicationId"], + "members":{ + "SipMediaApplicationId":{ + "shape":"NonEmptyString", + "documentation":"

The SIP media application ID.

", "location":"uri", - "locationName":"attendeeId" + "locationName":"sipMediaApplicationId" } } }, - "GetAttendeeResponse":{ + "GetSipMediaApplicationResponse":{ "type":"structure", "members":{ - "Attendee":{ - "shape":"Attendee", - "documentation":"

The Amazon Chime SDK attendee information.

" + "SipMediaApplication":{ + "shape":"SipMediaApplication", + "documentation":"

The SIP media application details.

" } } }, - "GetBotRequest":{ + "GetSipRuleRequest":{ + "type":"structure", + "required":["SipRuleId"], + "members":{ + "SipRuleId":{ + "shape":"NonEmptyString", + "documentation":"

The SIP rule ID.

", + "location":"uri", + "locationName":"sipRuleId" + } + } + }, + "GetSipRuleResponse":{ + "type":"structure", + "members":{ + "SipRule":{ + "shape":"SipRule", + "documentation":"

The SIP rule details.

" + } + } + }, + "GetUserRequest":{ "type":"structure", "required":[ "AccountId", - "BotId" + "UserId" ], "members":{ "AccountId":{ @@ -4029,713 +7034,909 @@ "location":"uri", "locationName":"accountId" }, - "BotId":{ + "UserId":{ "shape":"NonEmptyString", - "documentation":"

The bot ID.

", + "documentation":"

The user ID.

", "location":"uri", - "locationName":"botId" + "locationName":"userId" } } }, - "GetBotResponse":{ + "GetUserResponse":{ "type":"structure", "members":{ - "Bot":{ - "shape":"Bot", - "documentation":"

The chat bot details.

" + "User":{ + "shape":"User", + "documentation":"

The user details.

" } } }, - "GetEventsConfigurationRequest":{ + "GetUserSettingsRequest":{ "type":"structure", "required":[ "AccountId", - "BotId" + "UserId" ], "members":{ "AccountId":{ - "shape":"NonEmptyString", + "shape":"String", "documentation":"

The Amazon Chime account ID.

", "location":"uri", "locationName":"accountId" }, - "BotId":{ - "shape":"NonEmptyString", - "documentation":"

The bot ID.

", + "UserId":{ + "shape":"String", + "documentation":"

The user ID.

", "location":"uri", - "locationName":"botId" + "locationName":"userId" } } }, - "GetEventsConfigurationResponse":{ + "GetUserSettingsResponse":{ "type":"structure", "members":{ - "EventsConfiguration":{ - "shape":"EventsConfiguration", - "documentation":"

The events configuration details.

" + "UserSettings":{ + "shape":"UserSettings", + "documentation":"

The user settings.

" } } }, - "GetGlobalSettingsResponse":{ + "GetVoiceConnectorEmergencyCallingConfigurationRequest":{ "type":"structure", + "required":["VoiceConnectorId"], "members":{ - "BusinessCalling":{ - "shape":"BusinessCallingSettings", - "documentation":"

The Amazon Chime Business Calling settings.

" - }, - "VoiceConnector":{ - "shape":"VoiceConnectorSettings", - "documentation":"

The Amazon Chime Voice Connector settings.

" + "VoiceConnectorId":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon Chime Voice Connector ID.

", + "location":"uri", + "locationName":"voiceConnectorId" } } }, - "GetMeetingRequest":{ + "GetVoiceConnectorEmergencyCallingConfigurationResponse":{ "type":"structure", - "required":["MeetingId"], "members":{ - "MeetingId":{ - "shape":"GuidString", - "documentation":"

The Amazon Chime SDK meeting ID.

", + "EmergencyCallingConfiguration":{ + "shape":"EmergencyCallingConfiguration", + "documentation":"

The emergency calling configuration details.

" + } + } + }, + "GetVoiceConnectorGroupRequest":{ + "type":"structure", + "required":["VoiceConnectorGroupId"], + "members":{ + "VoiceConnectorGroupId":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon Chime Voice Connector group ID.

", "location":"uri", - "locationName":"meetingId" + "locationName":"voiceConnectorGroupId" } } }, - "GetMeetingResponse":{ + "GetVoiceConnectorGroupResponse":{ "type":"structure", "members":{ - "Meeting":{ - "shape":"Meeting", - "documentation":"

The Amazon Chime SDK meeting information.

" + "VoiceConnectorGroup":{ + "shape":"VoiceConnectorGroup", + "documentation":"

The Amazon Chime Voice Connector group details.

" } } }, - "GetPhoneNumberOrderRequest":{ + "GetVoiceConnectorLoggingConfigurationRequest":{ "type":"structure", - "required":["PhoneNumberOrderId"], + "required":["VoiceConnectorId"], "members":{ - "PhoneNumberOrderId":{ - "shape":"GuidString", - "documentation":"

The ID for the phone number order.

", + "VoiceConnectorId":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon Chime Voice Connector ID.

", "location":"uri", - "locationName":"phoneNumberOrderId" + "locationName":"voiceConnectorId" } } }, - "GetPhoneNumberOrderResponse":{ + "GetVoiceConnectorLoggingConfigurationResponse":{ "type":"structure", "members":{ - "PhoneNumberOrder":{ - "shape":"PhoneNumberOrder", - "documentation":"

The phone number order details.

" + "LoggingConfiguration":{ + "shape":"LoggingConfiguration", + "documentation":"

The logging configuration details.

" } } }, - "GetPhoneNumberRequest":{ + "GetVoiceConnectorOriginationRequest":{ "type":"structure", - "required":["PhoneNumberId"], + "required":["VoiceConnectorId"], "members":{ - "PhoneNumberId":{ - "shape":"String", - "documentation":"

The phone number ID.

", + "VoiceConnectorId":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon Chime Voice Connector ID.

", "location":"uri", - "locationName":"phoneNumberId" + "locationName":"voiceConnectorId" } } }, - "GetPhoneNumberResponse":{ + "GetVoiceConnectorOriginationResponse":{ "type":"structure", "members":{ - "PhoneNumber":{ - "shape":"PhoneNumber", - "documentation":"

The phone number details.

" + "Origination":{ + "shape":"Origination", + "documentation":"

The origination setting details.

" } } }, - "GetPhoneNumberSettingsResponse":{ + "GetVoiceConnectorProxyRequest":{ "type":"structure", + "required":["VoiceConnectorId"], "members":{ - "CallingName":{ - "shape":"CallingName", - "documentation":"

The default outbound calling name for the account.

" - }, - "CallingNameUpdatedTimestamp":{ - "shape":"Iso8601Timestamp", - "documentation":"

The updated outbound calling name timestamp, in ISO 8601 format.

" + "VoiceConnectorId":{ + "shape":"NonEmptyString128", + "documentation":"

The Amazon Chime voice connector ID.

", + "location":"uri", + "locationName":"voiceConnectorId" } } }, - "GetProxySessionRequest":{ + "GetVoiceConnectorProxyResponse":{ "type":"structure", - "required":[ - "VoiceConnectorId", - "ProxySessionId" - ], + "members":{ + "Proxy":{ + "shape":"Proxy", + "documentation":"

The proxy configuration details.

" + } + } + }, + "GetVoiceConnectorRequest":{ + "type":"structure", + "required":["VoiceConnectorId"], "members":{ "VoiceConnectorId":{ - "shape":"NonEmptyString128", - "documentation":"

The Amazon Chime voice connector ID.

", + "shape":"NonEmptyString", + "documentation":"

The Amazon Chime Voice Connector ID.

", "location":"uri", "locationName":"voiceConnectorId" - }, - "ProxySessionId":{ - "shape":"NonEmptyString128", - "documentation":"

The proxy session ID.

", + } + } + }, + "GetVoiceConnectorResponse":{ + "type":"structure", + "members":{ + "VoiceConnector":{ + "shape":"VoiceConnector", + "documentation":"

The Amazon Chime Voice Connector details.

" + } + } + }, + "GetVoiceConnectorStreamingConfigurationRequest":{ + "type":"structure", + "required":["VoiceConnectorId"], + "members":{ + "VoiceConnectorId":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon Chime Voice Connector ID.

", "location":"uri", - "locationName":"proxySessionId" + "locationName":"voiceConnectorId" } } }, - "GetProxySessionResponse":{ + "GetVoiceConnectorStreamingConfigurationResponse":{ "type":"structure", "members":{ - "ProxySession":{ - "shape":"ProxySession", - "documentation":"

The proxy session details.

" + "StreamingConfiguration":{ + "shape":"StreamingConfiguration", + "documentation":"

The streaming configuration details.

" } } }, - "GetRetentionSettingsRequest":{ + "GetVoiceConnectorTerminationHealthRequest":{ "type":"structure", - "required":["AccountId"], + "required":["VoiceConnectorId"], "members":{ - "AccountId":{ + "VoiceConnectorId":{ "shape":"NonEmptyString", - "documentation":"

The Amazon Chime account ID.

", + "documentation":"

The Amazon Chime Voice Connector ID.

", "location":"uri", - "locationName":"accountId" + "locationName":"voiceConnectorId" } } }, - "GetRetentionSettingsResponse":{ + "GetVoiceConnectorTerminationHealthResponse":{ "type":"structure", "members":{ - "RetentionSettings":{ - "shape":"RetentionSettings", - "documentation":"

The retention settings.

" - }, - "InitiateDeletionTimestamp":{ - "shape":"Iso8601Timestamp", - "documentation":"

The timestamp representing the time at which the specified items are permanently deleted, in ISO 8601 format.

" + "TerminationHealth":{ + "shape":"TerminationHealth", + "documentation":"

The termination health details.

" } } }, - "GetRoomRequest":{ + "GetVoiceConnectorTerminationRequest":{ "type":"structure", - "required":[ - "AccountId", - "RoomId" - ], + "required":["VoiceConnectorId"], "members":{ - "AccountId":{ - "shape":"NonEmptyString", - "documentation":"

The Amazon Chime account ID.

", - "location":"uri", - "locationName":"accountId" - }, - "RoomId":{ + "VoiceConnectorId":{ "shape":"NonEmptyString", - "documentation":"

The room ID.

", + "documentation":"

The Amazon Chime Voice Connector ID.

", "location":"uri", - "locationName":"roomId" + "locationName":"voiceConnectorId" } } }, - "GetRoomResponse":{ + "GetVoiceConnectorTerminationResponse":{ "type":"structure", "members":{ - "Room":{ - "shape":"Room", - "documentation":"

The room details.

" + "Termination":{ + "shape":"Termination", + "documentation":"

The termination setting details.

" } } }, - "GetUserRequest":{ + "GuidString":{ + "type":"string", + "pattern":"[a-fA-F0-9]{8}(?:-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}" + }, + "Identity":{ "type":"structure", - "required":[ - "AccountId", - "UserId" - ], "members":{ - "AccountId":{ - "shape":"NonEmptyString", - "documentation":"

The Amazon Chime account ID.

", - "location":"uri", - "locationName":"accountId" + "Arn":{ + "shape":"ChimeArn", + "documentation":"

The ARN in an Identity.

" }, - "UserId":{ - "shape":"NonEmptyString", - "documentation":"

The user ID.

", - "location":"uri", - "locationName":"userId" + "Name":{ + "shape":"ResourceName", + "documentation":"

The name in an Identity.

" } - } + }, + "documentation":"

The ARN and name of a user.

" }, - "GetUserResponse":{ + "Integer":{"type":"integer"}, + "Invite":{ "type":"structure", "members":{ - "User":{ - "shape":"User", - "documentation":"

The user details.

" + "InviteId":{ + "shape":"String", + "documentation":"

The invite ID.

" + }, + "Status":{ + "shape":"InviteStatus", + "documentation":"

The status of the invite.

" + }, + "EmailAddress":{ + "shape":"EmailAddress", + "documentation":"

The email address to which the invite is sent.

" + }, + "EmailStatus":{ + "shape":"EmailStatus", + "documentation":"

The status of the invite email.

" } - } + }, + "documentation":"

Invitation object returned after emailing users to invite them to join the Amazon Chime Team account.

" }, - "GetUserSettingsRequest":{ + "InviteList":{ + "type":"list", + "member":{"shape":"Invite"} + }, + "InviteStatus":{ + "type":"string", + "enum":[ + "Pending", + "Accepted", + "Failed" + ] + }, + "InviteUsersRequest":{ "type":"structure", "required":[ "AccountId", - "UserId" + "UserEmailList" ], "members":{ "AccountId":{ - "shape":"String", + "shape":"NonEmptyString", "documentation":"

The Amazon Chime account ID.

", "location":"uri", "locationName":"accountId" }, - "UserId":{ - "shape":"String", - "documentation":"

The user ID.

", - "location":"uri", - "locationName":"userId" + "UserEmailList":{ + "shape":"UserEmailList", + "documentation":"

The user email addresses to which to send the email invitation.

" + }, + "UserType":{ + "shape":"UserType", + "documentation":"

The user type.

" } } }, - "GetUserSettingsResponse":{ + "InviteUsersResponse":{ "type":"structure", "members":{ - "UserSettings":{ - "shape":"UserSettings", - "documentation":"

The user settings.

" + "Invites":{ + "shape":"InviteList", + "documentation":"

The email invitation details.

" } } }, - "GetVoiceConnectorEmergencyCallingConfigurationRequest":{ - "type":"structure", - "required":["VoiceConnectorId"], - "members":{ - "VoiceConnectorId":{ - "shape":"NonEmptyString", - "documentation":"

The Amazon Chime Voice Connector ID.

", - "location":"uri", - "locationName":"voiceConnectorId" - } - } + "Iso8601Timestamp":{ + "type":"timestamp", + "timestampFormat":"iso8601" }, - "GetVoiceConnectorEmergencyCallingConfigurationResponse":{ - "type":"structure", - "members":{ - "EmergencyCallingConfiguration":{ - "shape":"EmergencyCallingConfiguration", - "documentation":"

The emergency calling configuration details.

" - } - } + "JoinTokenString":{ + "type":"string", + "max":2048, + "min":2, + "pattern":"^[a-zA-Z0-9+/]+$", + "sensitive":true }, - "GetVoiceConnectorGroupRequest":{ + "License":{ + "type":"string", + "enum":[ + "Basic", + "Plus", + "Pro", + "ProTrial" + ] + }, + "LicenseList":{ + "type":"list", + "member":{"shape":"License"} + }, + "ListAccountsRequest":{ "type":"structure", - "required":["VoiceConnectorGroupId"], "members":{ - "VoiceConnectorGroupId":{ - "shape":"NonEmptyString", - "documentation":"

The Amazon Chime Voice Connector group ID.

", - "location":"uri", - "locationName":"voiceConnectorGroupId" + "Name":{ + "shape":"AccountName", + "documentation":"

Amazon Chime account name prefix with which to filter results.

", + "location":"querystring", + "locationName":"name" + }, + "UserEmail":{ + "shape":"EmailAddress", + "documentation":"

User email address with which to filter results.

", + "location":"querystring", + "locationName":"user-email" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The token to use to retrieve the next page of results.

", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"ProfileServiceMaxResults", + "documentation":"

The maximum number of results to return in a single call. Defaults to 100.

", + "location":"querystring", + "locationName":"max-results" } } }, - "GetVoiceConnectorGroupResponse":{ + "ListAccountsResponse":{ "type":"structure", "members":{ - "VoiceConnectorGroup":{ - "shape":"VoiceConnectorGroup", - "documentation":"

The Amazon Chime Voice Connector group details.

" + "Accounts":{ + "shape":"AccountList", + "documentation":"

The list of accounts.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The account's user token.

" } } }, - "GetVoiceConnectorLoggingConfigurationRequest":{ + "ListAppInstanceAdminsRequest":{ "type":"structure", - "required":["VoiceConnectorId"], + "required":["AppInstanceArn"], "members":{ - "VoiceConnectorId":{ - "shape":"NonEmptyString", - "documentation":"

The Amazon Chime Voice Connector ID.

", + "AppInstanceArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the app instance.

", "location":"uri", - "locationName":"voiceConnectorId" + "locationName":"appInstanceArn" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of administrators that you want to return.

", + "location":"querystring", + "locationName":"max-results" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token returned from previous API requests until the number of administrators is reached.

", + "location":"querystring", + "locationName":"next-token" } } }, - "GetVoiceConnectorLoggingConfigurationResponse":{ + "ListAppInstanceAdminsResponse":{ "type":"structure", "members":{ - "LoggingConfiguration":{ - "shape":"LoggingConfiguration", - "documentation":"

The logging configuration details.

" + "AppInstanceArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the app instance.

" + }, + "AppInstanceAdmins":{ + "shape":"AppInstanceAdminList", + "documentation":"

The information for each administrator.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token returned from previous API requests until the number of administrators is reached.

" } } - }, - "GetVoiceConnectorOriginationRequest":{ - "type":"structure", - "required":["VoiceConnectorId"], - "members":{ - "VoiceConnectorId":{ - "shape":"NonEmptyString", - "documentation":"

The Amazon Chime Voice Connector ID.

", - "location":"uri", - "locationName":"voiceConnectorId" + }, + "ListAppInstanceUsersRequest":{ + "type":"structure", + "required":["AppInstanceArn"], + "members":{ + "AppInstanceArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the app instance.

", + "location":"querystring", + "locationName":"app-instance-arn" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of requests that you want returned.

", + "location":"querystring", + "locationName":"max-results" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token passed by previous API calls until all requested users are returned.

", + "location":"querystring", + "locationName":"next-token" } } }, - "GetVoiceConnectorOriginationResponse":{ + "ListAppInstanceUsersResponse":{ "type":"structure", "members":{ - "Origination":{ - "shape":"Origination", - "documentation":"

The origination setting details.

" + "AppInstanceArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the app instance.

" + }, + "AppInstanceUsers":{ + "shape":"AppInstanceUserList", + "documentation":"

The information for each of the requested app instance users.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token passed by previous API calls until all requested users are returned.

" } } }, - "GetVoiceConnectorProxyRequest":{ + "ListAppInstancesRequest":{ "type":"structure", - "required":["VoiceConnectorId"], "members":{ - "VoiceConnectorId":{ - "shape":"NonEmptyString128", - "documentation":"

The Amazon Chime voice connector ID.

", - "location":"uri", - "locationName":"voiceConnectorId" + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of app instances that you want to return.

", + "location":"querystring", + "locationName":"max-results" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token passed by previous API requests until you reach the maximum number of app instances.

", + "location":"querystring", + "locationName":"next-token" } } }, - "GetVoiceConnectorProxyResponse":{ + "ListAppInstancesResponse":{ "type":"structure", "members":{ - "Proxy":{ - "shape":"Proxy", - "documentation":"

The proxy configuration details.

" + "AppInstances":{ + "shape":"AppInstanceList", + "documentation":"

The information for each app instance.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token passed by previous API requests until the maximum number of app instances is reached.

" } } }, - "GetVoiceConnectorRequest":{ + "ListAttendeeTagsRequest":{ "type":"structure", - "required":["VoiceConnectorId"], + "required":[ + "MeetingId", + "AttendeeId" + ], "members":{ - "VoiceConnectorId":{ - "shape":"NonEmptyString", - "documentation":"

The Amazon Chime Voice Connector ID.

", + "MeetingId":{ + "shape":"GuidString", + "documentation":"

The Amazon Chime SDK meeting ID.

", "location":"uri", - "locationName":"voiceConnectorId" + "locationName":"meetingId" + }, + "AttendeeId":{ + "shape":"GuidString", + "documentation":"

The Amazon Chime SDK attendee ID.

", + "location":"uri", + "locationName":"attendeeId" } } }, - "GetVoiceConnectorResponse":{ + "ListAttendeeTagsResponse":{ "type":"structure", "members":{ - "VoiceConnector":{ - "shape":"VoiceConnector", - "documentation":"

The Amazon Chime Voice Connector details.

" + "Tags":{ + "shape":"TagList", + "documentation":"

A list of tag key-value pairs.

" } } }, - "GetVoiceConnectorStreamingConfigurationRequest":{ + "ListAttendeesRequest":{ "type":"structure", - "required":["VoiceConnectorId"], + "required":["MeetingId"], "members":{ - "VoiceConnectorId":{ - "shape":"NonEmptyString", - "documentation":"

The Amazon Chime Voice Connector ID.

", + "MeetingId":{ + "shape":"GuidString", + "documentation":"

The Amazon Chime SDK meeting ID.

", "location":"uri", - "locationName":"voiceConnectorId" + "locationName":"meetingId" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The token to use to retrieve the next page of results.

", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"ResultMax", + "documentation":"

The maximum number of results to return in a single call.

", + "location":"querystring", + "locationName":"max-results" } } }, - "GetVoiceConnectorStreamingConfigurationResponse":{ + "ListAttendeesResponse":{ "type":"structure", "members":{ - "StreamingConfiguration":{ - "shape":"StreamingConfiguration", - "documentation":"

The streaming configuration details.

" + "Attendees":{ + "shape":"AttendeeList", + "documentation":"

The Amazon Chime SDK attendee information.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The token to use to retrieve the next page of results.

" } } }, - "GetVoiceConnectorTerminationHealthRequest":{ + "ListBotsRequest":{ "type":"structure", - "required":["VoiceConnectorId"], + "required":["AccountId"], "members":{ - "VoiceConnectorId":{ + "AccountId":{ "shape":"NonEmptyString", - "documentation":"

The Amazon Chime Voice Connector ID.

", + "documentation":"

The Amazon Chime account ID.

", "location":"uri", - "locationName":"voiceConnectorId" + "locationName":"accountId" + }, + "MaxResults":{ + "shape":"ResultMax", + "documentation":"

The maximum number of results to return in a single call. The default is 10.

", + "location":"querystring", + "locationName":"max-results" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The token to use to retrieve the next page of results.

", + "location":"querystring", + "locationName":"next-token" } } }, - "GetVoiceConnectorTerminationHealthResponse":{ + "ListBotsResponse":{ "type":"structure", "members":{ - "TerminationHealth":{ - "shape":"TerminationHealth", - "documentation":"

The termination health details.

" + "Bots":{ + "shape":"BotList", + "documentation":"

List of bots and bot details.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The token to use to retrieve the next page of results.

" } } }, - "GetVoiceConnectorTerminationRequest":{ + "ListChannelBansRequest":{ "type":"structure", - "required":["VoiceConnectorId"], + "required":["ChannelArn"], "members":{ - "VoiceConnectorId":{ - "shape":"NonEmptyString", - "documentation":"

The Amazon Chime Voice Connector ID.

", + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel.

", "location":"uri", - "locationName":"voiceConnectorId" + "locationName":"channelArn" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of bans that you want returned.

", + "location":"querystring", + "locationName":"max-results" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token passed by previous API calls until all requested bans are returned.

", + "location":"querystring", + "locationName":"next-token" } } }, - "GetVoiceConnectorTerminationResponse":{ + "ListChannelBansResponse":{ "type":"structure", "members":{ - "Termination":{ - "shape":"Termination", - "documentation":"

The termination setting details.

" + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token passed by previous API calls until all requested bans are returned.

" + }, + "ChannelBans":{ + "shape":"ChannelBanSummaryList", + "documentation":"

The information for each requested ban.

" } } }, - "GuidString":{ - "type":"string", - "pattern":"[a-fA-F0-9]{8}(?:-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}" - }, - "Integer":{"type":"integer"}, - "Invite":{ + "ListChannelMembershipsForAppInstanceUserRequest":{ "type":"structure", "members":{ - "InviteId":{ - "shape":"String", - "documentation":"

The invite ID.

" - }, - "Status":{ - "shape":"InviteStatus", - "documentation":"

The status of the invite.

" + "AppInstanceUserArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the app instance users

", + "location":"querystring", + "locationName":"app-instance-user-arn" }, - "EmailAddress":{ - "shape":"EmailAddress", - "documentation":"

The email address to which the invite is sent.

" + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of users that you want returned.

", + "location":"querystring", + "locationName":"max-results" }, - "EmailStatus":{ - "shape":"EmailStatus", - "documentation":"

The status of the invite email.

" + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token returned from previous API requests until the number of channel memberships is reached.

", + "location":"querystring", + "locationName":"next-token" } - }, - "documentation":"

Invitation object returned after emailing users to invite them to join the Amazon Chime Team account.

" - }, - "InviteList":{ - "type":"list", - "member":{"shape":"Invite"} - }, - "InviteStatus":{ - "type":"string", - "enum":[ - "Pending", - "Accepted", - "Failed" - ] + } }, - "InviteUsersRequest":{ + "ListChannelMembershipsForAppInstanceUserResponse":{ "type":"structure", - "required":[ - "AccountId", - "UserEmailList" - ], "members":{ - "AccountId":{ - "shape":"NonEmptyString", - "documentation":"

The Amazon Chime account ID.

", - "location":"uri", - "locationName":"accountId" - }, - "UserEmailList":{ - "shape":"UserEmailList", - "documentation":"

The user email addresses to which to send the email invitation.

" + "ChannelMemberships":{ + "shape":"ChannelMembershipForAppInstanceUserSummaryList", + "documentation":"

The token passed by previous API calls until all requested users are returned.

" }, - "UserType":{ - "shape":"UserType", - "documentation":"

The user type.

" + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token passed by previous API calls until all requested users are returned.

" } } }, - "InviteUsersResponse":{ + "ListChannelMembershipsRequest":{ "type":"structure", + "required":["ChannelArn"], "members":{ - "Invites":{ - "shape":"InviteList", - "documentation":"

The email invitation details.

" + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The maximum number of channel memberships that you want returned.

", + "location":"uri", + "locationName":"channelArn" + }, + "Type":{ + "shape":"ChannelMembershipType", + "documentation":"

The membership type of a user, DEFAULT or HIDDEN. Default members are always returned as part of ListChannelMemberships. Hidden members are only returned if the type filter in ListChannelMemberships equals HIDDEN. Otherwise hidden members are not returned.

", + "location":"querystring", + "locationName":"type" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of channel memberships that you want returned.

", + "location":"querystring", + "locationName":"max-results" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token passed by previous API calls until all requested channel memberships are returned..

", + "location":"querystring", + "locationName":"next-token" } } }, - "Iso8601Timestamp":{ - "type":"timestamp", - "timestampFormat":"iso8601" - }, - "JoinTokenString":{ - "type":"string", - "max":2048, - "min":2, - "sensitive":true - }, - "License":{ - "type":"string", - "enum":[ - "Basic", - "Plus", - "Pro", - "ProTrial" - ] - }, - "LicenseList":{ - "type":"list", - "member":{"shape":"License"} + "ListChannelMembershipsResponse":{ + "type":"structure", + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel.

" + }, + "ChannelMemberships":{ + "shape":"ChannelMembershipSummaryList", + "documentation":"

The information for the requested channel memberships.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token passed by previous API calls until all requested channel memberships are returned.

" + } + } }, - "ListAccountsRequest":{ + "ListChannelMessagesRequest":{ "type":"structure", + "required":["ChannelArn"], "members":{ - "Name":{ - "shape":"AccountName", - "documentation":"

Amazon Chime account name prefix with which to filter results.

", + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel.

", + "location":"uri", + "locationName":"channelArn" + }, + "SortOrder":{ + "shape":"SortOrder", + "documentation":"

The order in which you want messages sorted. Default is Descending, based on time created.

", "location":"querystring", - "locationName":"name" + "locationName":"sort-order" }, - "UserEmail":{ - "shape":"EmailAddress", - "documentation":"

User email address with which to filter results.

", + "NotBefore":{ + "shape":"Timestamp", + "documentation":"

The initial or starting time stamp for your requested messages.

", "location":"querystring", - "locationName":"user-email" + "locationName":"not-before" }, - "NextToken":{ - "shape":"String", - "documentation":"

The token to use to retrieve the next page of results.

", + "NotAfter":{ + "shape":"Timestamp", + "documentation":"

The final or ending time stamp for your requested messages.

", "location":"querystring", - "locationName":"next-token" + "locationName":"not-after" }, "MaxResults":{ - "shape":"ProfileServiceMaxResults", - "documentation":"

The maximum number of results to return in a single call. Defaults to 100.

", + "shape":"MaxResults", + "documentation":"

The maximum number of messages that you want returned.

", "location":"querystring", "locationName":"max-results" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token passed by previous API calls until all requested messages are returned.

", + "location":"querystring", + "locationName":"next-token" } } }, - "ListAccountsResponse":{ + "ListChannelMessagesResponse":{ "type":"structure", "members":{ - "Accounts":{ - "shape":"AccountList", - "documentation":"

List of Amazon Chime accounts and account details.

" + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel containing the requested messages.

" }, "NextToken":{ - "shape":"String", - "documentation":"

The token to use to retrieve the next page of results.

" + "shape":"NextToken", + "documentation":"

The token passed by previous API calls until all requested messages are returned.

" + }, + "ChannelMessages":{ + "shape":"ChannelMessageSummaryList", + "documentation":"

The information about and content of each requested message.

" } } }, - "ListAttendeeTagsRequest":{ + "ListChannelModeratorsRequest":{ "type":"structure", - "required":[ - "MeetingId", - "AttendeeId" - ], + "required":["ChannelArn"], "members":{ - "MeetingId":{ - "shape":"GuidString", - "documentation":"

The Amazon Chime SDK meeting ID.

", + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel.

", "location":"uri", - "locationName":"meetingId" + "locationName":"channelArn" }, - "AttendeeId":{ - "shape":"GuidString", - "documentation":"

The Amazon Chime SDK attendee ID.

", - "location":"uri", - "locationName":"attendeeId" + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of moderators that you want returned.

", + "location":"querystring", + "locationName":"max-results" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token passed by previous API calls until all requested moderators are returned.

", + "location":"querystring", + "locationName":"next-token" } } }, - "ListAttendeeTagsResponse":{ + "ListChannelModeratorsResponse":{ "type":"structure", "members":{ - "Tags":{ - "shape":"TagList", - "documentation":"

A list of tag key-value pairs.

" + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token passed by previous API calls until all requested moderators are returned.

" + }, + "ChannelModerators":{ + "shape":"ChannelModeratorSummaryList", + "documentation":"

The information about and names of each moderator.

" } } }, - "ListAttendeesRequest":{ + "ListChannelsModeratedByAppInstanceUserRequest":{ "type":"structure", - "required":["MeetingId"], "members":{ - "MeetingId":{ - "shape":"GuidString", - "documentation":"

The Amazon Chime SDK meeting ID.

", - "location":"uri", - "locationName":"meetingId" - }, - "NextToken":{ - "shape":"String", - "documentation":"

The token to use to retrieve the next page of results.

", + "AppInstanceUserArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the user in the moderated channel.

", "location":"querystring", - "locationName":"next-token" + "locationName":"app-instance-user-arn" }, "MaxResults":{ - "shape":"ResultMax", - "documentation":"

The maximum number of results to return in a single call.

", + "shape":"MaxResults", + "documentation":"

The maximum number of channels in the request.

", "location":"querystring", "locationName":"max-results" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token returned from previous API requests until the number of channels moderated by the user is reached.

", + "location":"querystring", + "locationName":"next-token" } } }, - "ListAttendeesResponse":{ + "ListChannelsModeratedByAppInstanceUserResponse":{ "type":"structure", "members":{ - "Attendees":{ - "shape":"AttendeeList", - "documentation":"

The Amazon Chime SDK attendee information.

" + "Channels":{ + "shape":"ChannelModeratedByAppInstanceUserSummaryList", + "documentation":"

The moderated channels in the request.

" }, "NextToken":{ - "shape":"String", - "documentation":"

The token to use to retrieve the next page of results.

" + "shape":"NextToken", + "documentation":"

The token returned from previous API requests until the number of channels moderated by the user is reached.

" } } }, - "ListBotsRequest":{ + "ListChannelsRequest":{ "type":"structure", - "required":["AccountId"], + "required":["AppInstanceArn"], "members":{ - "AccountId":{ - "shape":"NonEmptyString", - "documentation":"

The Amazon Chime account ID.

", - "location":"uri", - "locationName":"accountId" + "AppInstanceArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the app instance.

", + "location":"querystring", + "locationName":"app-instance-arn" + }, + "Privacy":{ + "shape":"ChannelPrivacy", + "documentation":"

The privacy setting. PUBLIC retrieves all the public channels. PRIVATE retrieves private channels. Only an app instance administrator can retrieve private channels.

", + "location":"querystring", + "locationName":"privacy" }, "MaxResults":{ - "shape":"ResultMax", - "documentation":"

The maximum number of results to return in a single call. The default is 10.

", + "shape":"MaxResults", + "documentation":"

The maximum number of channels that you want to return.

", "location":"querystring", "locationName":"max-results" }, "NextToken":{ - "shape":"String", - "documentation":"

The token to use to retrieve the next page of results.

", + "shape":"NextToken", + "documentation":"

The token passed by previous API calls until all requested channels are returned.

", "location":"querystring", "locationName":"next-token" } } }, - "ListBotsResponse":{ + "ListChannelsResponse":{ "type":"structure", "members":{ - "Bots":{ - "shape":"BotList", - "documentation":"

List of bots and bot details.

" + "Channels":{ + "shape":"ChannelSummaryList", + "documentation":"

The information about each channel.

" }, "NextToken":{ - "shape":"String", - "documentation":"

The token to use to retrieve the next page of results.

" + "shape":"NextToken", + "documentation":"

The token returned from previous API requests until the number of channels is reached.

" } } }, @@ -5006,6 +8207,72 @@ } } }, + "ListSipMediaApplicationsRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"ResultMax", + "documentation":"

The maximum number of results to return in a single call. Defaults to 100.

", + "location":"querystring", + "locationName":"max-results" + }, + "NextToken":{ + "shape":"NextTokenString", + "documentation":"

The token to use to retrieve the next page of results.

", + "location":"querystring", + "locationName":"next-token" + } + } + }, + "ListSipMediaApplicationsResponse":{ + "type":"structure", + "members":{ + "SipMediaApplications":{ + "shape":"SipMediaApplicationList", + "documentation":"

List of SIP media applications and application details.

" + }, + "NextToken":{ + "shape":"NextTokenString", + "documentation":"

The token to use to retrieve the next page of results.

" + } + } + }, + "ListSipRulesRequest":{ + "type":"structure", + "members":{ + "SipMediaApplicationId":{ + "shape":"NonEmptyString", + "documentation":"

The SIP media application ID.

", + "location":"querystring", + "locationName":"sip-media-application" + }, + "MaxResults":{ + "shape":"ResultMax", + "documentation":"

The maximum number of results to return in a single call. Defaults to 100.

", + "location":"querystring", + "locationName":"max-results" + }, + "NextToken":{ + "shape":"NextTokenString", + "documentation":"

The token to use to retrieve the next page of results.

", + "location":"querystring", + "locationName":"next-token" + } + } + }, + "ListSipRulesResponse":{ + "type":"structure", + "members":{ + "SipRules":{ + "shape":"SipRuleList", + "documentation":"

List of SIP rules and rule details.

" + }, + "NextToken":{ + "shape":"NextTokenString", + "documentation":"

The token to use to retrieve the next page of results.

" + } + } + }, "ListTagsForResourceRequest":{ "type":"structure", "required":["ResourceARN"], @@ -5193,6 +8460,11 @@ "members":{ } }, + "MaxResults":{ + "type":"integer", + "max":50, + "min":1 + }, "MediaPlacement":{ "type":"structure", "members":{ @@ -5354,10 +8626,54 @@ "member":{"shape":"MembershipItem"}, "max":50 }, + "MessageId":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[-_a-zA-Z0-9]*" + }, + "MessagingSessionEndpoint":{ + "type":"structure", + "members":{ + "Url":{ + "shape":"UrlType", + "documentation":"

The URL of a meeting session endpoint.

" + } + }, + "documentation":"

The endpoint of a meeting session.

" + }, + "Metadata":{ + "type":"string", + "max":1024, + "min":0, + "pattern":".*", + "sensitive":true + }, + "NextToken":{ + "type":"string", + "max":2048, + "min":0, + "pattern":".*", + "sensitive":true + }, "NextTokenString":{ "type":"string", "max":65535 }, + "NonEmptyContent":{ + "type":"string", + "max":4096, + "min":1, + "pattern":"[\\s\\S]*", + "sensitive":true + }, + "NonEmptyResourceName":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u007E\\u0085\\u00A0-\\uD7FF\\uE000-\\uFFFD\\u10000-\\u10FFFF]*", + "sensitive":true + }, "NonEmptyString":{ "type":"string", "pattern":".*\\S.*" @@ -5373,6 +8689,7 @@ "member":{"shape":"String"}, "min":1 }, + "NonNullableBoolean":{"type":"boolean"}, "NotFoundException":{ "type":"structure", "members":{ @@ -5592,7 +8909,8 @@ "AccountId", "UserId", "VoiceConnectorId", - "VoiceConnectorGroupId" + "VoiceConnectorGroupId", + "SipRuleId" ] }, "PhoneNumberCapabilities":{ @@ -5837,6 +9155,66 @@ "type":"list", "member":{"shape":"ProxySession"} }, + "PutAppInstanceRetentionSettingsRequest":{ + "type":"structure", + "required":[ + "AppInstanceArn", + "AppInstanceRetentionSettings" + ], + "members":{ + "AppInstanceArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the app instance.

", + "location":"uri", + "locationName":"appInstanceArn" + }, + "AppInstanceRetentionSettings":{ + "shape":"AppInstanceRetentionSettings", + "documentation":"

The time in days to retain data. Data type: number.

" + } + } + }, + "PutAppInstanceRetentionSettingsResponse":{ + "type":"structure", + "members":{ + "AppInstanceRetentionSettings":{ + "shape":"AppInstanceRetentionSettings", + "documentation":"

The time in days to retain data. Data type: number.

" + }, + "InitiateDeletionTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time at which the API deletes data.

" + } + } + }, + "PutAppInstanceStreamingConfigurationsRequest":{ + "type":"structure", + "required":[ + "AppInstanceArn", + "AppInstanceStreamingConfigurations" + ], + "members":{ + "AppInstanceArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the app instance.

", + "location":"uri", + "locationName":"appInstanceArn" + }, + "AppInstanceStreamingConfigurations":{ + "shape":"AppInstanceStreamingConfigurationList", + "documentation":"

The streaming configurations set for an app instance.

" + } + } + }, + "PutAppInstanceStreamingConfigurationsResponse":{ + "type":"structure", + "members":{ + "AppInstanceStreamingConfigurations":{ + "shape":"AppInstanceStreamingConfigurationList", + "documentation":"

The streaming configurations of an app instance.

" + } + } + }, "PutEventsConfigurationRequest":{ "type":"structure", "required":[ @@ -5904,6 +9282,31 @@ } } }, + "PutSipMediaApplicationLoggingConfigurationRequest":{ + "type":"structure", + "required":["SipMediaApplicationId"], + "members":{ + "SipMediaApplicationId":{ + "shape":"NonEmptyString", + "documentation":"

The ID of the specified SIP media application

", + "location":"uri", + "locationName":"sipMediaApplicationId" + }, + "SipMediaApplicationLoggingConfiguration":{ + "shape":"SipMediaApplicationLoggingConfiguration", + "documentation":"

The actual logging configuration.

" + } + } + }, + "PutSipMediaApplicationLoggingConfigurationResponse":{ + "type":"structure", + "members":{ + "SipMediaApplicationLoggingConfiguration":{ + "shape":"SipMediaApplicationLoggingConfiguration", + "documentation":"

The actual logging configuration.

" + } + } + }, "PutVoiceConnectorEmergencyCallingConfigurationRequest":{ "type":"structure", "required":[ @@ -6101,6 +9504,40 @@ } } }, + "RedactChannelMessageRequest":{ + "type":"structure", + "required":[ + "ChannelArn", + "MessageId" + ], + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel containing the messages that you want to redact.

", + "location":"uri", + "locationName":"channelArn" + }, + "MessageId":{ + "shape":"MessageId", + "documentation":"

The ID of the message being redacted.

", + "location":"uri", + "locationName":"messageId" + } + } + }, + "RedactChannelMessageResponse":{ + "type":"structure", + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel containing the messages that you want to redact.

" + }, + "MessageId":{ + "shape":"MessageId", + "documentation":"

The ID of the message being redacted.

" + } + } + }, "RedactConversationMessageRequest":{ "type":"structure", "required":[ @@ -6242,6 +9679,13 @@ "error":{"httpStatusCode":400}, "exception":true }, + "ResourceName":{ + "type":"string", + "max":256, + "min":0, + "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u007E\\u0085\\u00A0-\\uD7FF\\uE000-\\uFFFD\\u10000-\\u10FFFF]*", + "sensitive":true + }, "RestorePhoneNumberRequest":{ "type":"structure", "required":["PhoneNumberId"], @@ -6421,6 +9865,58 @@ } } }, + "SendChannelMessageRequest":{ + "type":"structure", + "required":[ + "ChannelArn", + "Content", + "Type", + "Persistence", + "ClientRequestToken" + ], + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel.

", + "location":"uri", + "locationName":"channelArn" + }, + "Content":{ + "shape":"NonEmptyContent", + "documentation":"

The content of the message.

" + }, + "Type":{ + "shape":"ChannelMessageType", + "documentation":"

The type of message, STANDARD or CONTROL.

" + }, + "Persistence":{ + "shape":"ChannelMessagePersistenceType", + "documentation":"

Boolean that controls whether the message is persisted on the back end. Required.

" + }, + "Metadata":{ + "shape":"Metadata", + "documentation":"

The optional metadata for each message.

" + }, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

The Idempotency token for each client request.

", + "idempotencyToken":true + } + } + }, + "SendChannelMessageResponse":{ + "type":"structure", + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel.

" + }, + "MessageId":{ + "shape":"MessageId", + "documentation":"

The ID string assigned to each message.

" + } + } + }, "SensitiveString":{ "type":"string", "sensitive":true @@ -6465,6 +9961,170 @@ "type":"list", "member":{"shape":"SigninDelegateGroup"} }, + "SipApplicationPriority":{ + "type":"integer", + "min":1 + }, + "SipMediaApplication":{ + "type":"structure", + "members":{ + "SipMediaApplicationId":{ + "shape":"NonEmptyString", + "documentation":"

The SIP media application ID.

" + }, + "AwsRegion":{ + "shape":"String", + "documentation":"

The AWS Region in which the SIP media application is created.

" + }, + "Name":{ + "shape":"SipMediaApplicationName", + "documentation":"

The name of the SIP media application.

" + }, + "Endpoints":{ + "shape":"SipMediaApplicationEndpointList", + "documentation":"

List of endpoints for SIP media application. Currently, only one endpoint per SIP media application is permitted.

" + }, + "CreatedTimestamp":{ + "shape":"Iso8601Timestamp", + "documentation":"

The SIP media application creation timestamp, in ISO 8601 format.

" + }, + "UpdatedTimestamp":{ + "shape":"Iso8601Timestamp", + "documentation":"

The SIP media application updated timestamp, in ISO 8601 format.

" + } + }, + "documentation":"

The SIP media application details, including name and endpoints. An AWS account can have multiple SIP media applications.

" + }, + "SipMediaApplicationCall":{ + "type":"structure", + "members":{ + "TransactionId":{ + "shape":"GuidString", + "documentation":"

The transaction ID of a call.

" + } + }, + "documentation":"

A Call instance for a SIP media application.

" + }, + "SipMediaApplicationEndpoint":{ + "type":"structure", + "members":{ + "LambdaArn":{ + "shape":"FunctionArn", + "documentation":"

Valid Amazon Resource Name (ARN) of the Lambda function of the same AWS Region where the SIP media application is created.

" + } + }, + "documentation":"

Endpoints to specify as part of a SIP media application.

" + }, + "SipMediaApplicationEndpointList":{ + "type":"list", + "member":{"shape":"SipMediaApplicationEndpoint"}, + "max":1, + "min":1 + }, + "SipMediaApplicationList":{ + "type":"list", + "member":{"shape":"SipMediaApplication"} + }, + "SipMediaApplicationLoggingConfiguration":{ + "type":"structure", + "members":{ + "EnableSipMediaApplicationMessageLogs":{ + "shape":"Boolean", + "documentation":"

Enables application message logs for the SIP media application.

" + } + }, + "documentation":"

Logging configuration of the SIP media application.

" + }, + "SipMediaApplicationName":{ + "type":"string", + "max":256, + "min":1 + }, + "SipRule":{ + "type":"structure", + "members":{ + "SipRuleId":{ + "shape":"NonEmptyString", + "documentation":"

The SIP rule ID.

" + }, + "Name":{ + "shape":"SipRuleName", + "documentation":"

The name of the SIP rule.

" + }, + "Disabled":{ + "shape":"Boolean", + "documentation":"

Indicates if the SIP rule is enabled or disabled. You must disable a rule before you can delete it.

" + }, + "TriggerType":{ + "shape":"SipRuleTriggerType", + "documentation":"

The type of trigger whose value is assigned to the SIP rule in TriggerValue.

" + }, + "TriggerValue":{ + "shape":"NonEmptyString", + "documentation":"

If TriggerType is RequestUriHostname, then the value can be the outbound host name of the Amazon Chime Voice Connector. If TriggerType is ToPhoneNumber, then the value can be a customer-owned phone number in E164 format. SipRule is triggered when a SIP rule requests host name or ToPhoneNumber matches in the incoming SIP request.

" + }, + "TargetApplications":{ + "shape":"SipRuleTargetApplicationList", + "documentation":"

List of SIP media applications with priority and AWS Region. You can only use one SIP application per AWS Region and priority combination.

" + }, + "CreatedTimestamp":{ + "shape":"Iso8601Timestamp", + "documentation":"

The SIP rule created timestamp, in ISO 8601 format.

" + }, + "UpdatedTimestamp":{ + "shape":"Iso8601Timestamp", + "documentation":"

The SIP rule updated timestamp, in ISO 8601 format.

" + } + }, + "documentation":"

The SIP rule details, including name, triggers, and target applications. An AWS account can have multiple SIP rules.

" + }, + "SipRuleList":{ + "type":"list", + "member":{"shape":"SipRule"} + }, + "SipRuleName":{ + "type":"string", + "max":256, + "min":1 + }, + "SipRuleTargetApplication":{ + "type":"structure", + "members":{ + "SipMediaApplicationId":{ + "shape":"NonEmptyString", + "documentation":"

The SIP media application ID.

" + }, + "Priority":{ + "shape":"SipApplicationPriority", + "documentation":"

Priority of the SIP media application in the target list.

" + }, + "AwsRegion":{ + "shape":"String", + "documentation":"

AWS Region of target application.

" + } + }, + "documentation":"

Target SIP media application along with other details like priority and AWS Region to be specified in the SIP rule. Only one SIP rule per AWS Region can be provided.

" + }, + "SipRuleTargetApplicationList":{ + "type":"list", + "member":{"shape":"SipRuleTargetApplication"}, + "max":25, + "min":1 + }, + "SipRuleTriggerType":{ + "type":"string", + "enum":[ + "ToPhoneNumber", + "RequestUriHostname" + ] + }, + "SortOrder":{ + "type":"string", + "enum":[ + "ASCENDING", + "DESCENDING" + ] + }, "StreamingConfiguration":{ "type":"structure", "required":["DataRetentionInHours"], @@ -6687,6 +10347,7 @@ "error":{"httpStatusCode":429}, "exception":true }, + "Timestamp":{"type":"timestamp"}, "TollFreePrefix":{ "type":"string", "max":3, @@ -6794,10 +10455,7 @@ "UpdateAccountResponse":{ "type":"structure", "members":{ - "Account":{ - "shape":"Account", - "documentation":"

The updated Amazon Chime account details.

" - } + "Account":{"shape":"Account"} } }, "UpdateAccountSettingsRequest":{ @@ -6824,6 +10482,70 @@ "members":{ } }, + "UpdateAppInstanceRequest":{ + "type":"structure", + "required":[ + "AppInstanceArn", + "Name" + ], + "members":{ + "AppInstanceArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the app instance.

", + "location":"uri", + "locationName":"appInstanceArn" + }, + "Name":{ + "shape":"NonEmptyResourceName", + "documentation":"

The name that you want to change.

" + }, + "Metadata":{ + "shape":"Metadata", + "documentation":"

The metadata that you want to change.

" + } + } + }, + "UpdateAppInstanceResponse":{ + "type":"structure", + "members":{ + "AppInstanceArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the app instance.

" + } + } + }, + "UpdateAppInstanceUserRequest":{ + "type":"structure", + "required":[ + "AppInstanceUserArn", + "Name" + ], + "members":{ + "AppInstanceUserArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the app instance user.

", + "location":"uri", + "locationName":"appInstanceUserArn" + }, + "Name":{ + "shape":"UserName", + "documentation":"

The name of the app instance user.

" + }, + "Metadata":{ + "shape":"Metadata", + "documentation":"

The metadata of the app instance user.

" + } + } + }, + "UpdateAppInstanceUserResponse":{ + "type":"structure", + "members":{ + "AppInstanceUserArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the app instance user.

" + } + } + }, "UpdateBotRequest":{ "type":"structure", "required":[ @@ -6858,6 +10580,106 @@ } } }, + "UpdateChannelMessageRequest":{ + "type":"structure", + "required":[ + "ChannelArn", + "MessageId" + ], + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel.

", + "location":"uri", + "locationName":"channelArn" + }, + "MessageId":{ + "shape":"MessageId", + "documentation":"

The ID string of the message being updated.

", + "location":"uri", + "locationName":"messageId" + }, + "Content":{ + "shape":"Content", + "documentation":"

The content of the message being updated.

" + }, + "Metadata":{ + "shape":"Metadata", + "documentation":"

The metadata of the message being updated.

" + } + } + }, + "UpdateChannelMessageResponse":{ + "type":"structure", + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel.

" + }, + "MessageId":{ + "shape":"MessageId", + "documentation":"

The ID string of the message being updated.

" + } + } + }, + "UpdateChannelReadMarkerRequest":{ + "type":"structure", + "required":["ChannelArn"], + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel.

", + "location":"uri", + "locationName":"channelArn" + } + } + }, + "UpdateChannelReadMarkerResponse":{ + "type":"structure", + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel.

" + } + } + }, + "UpdateChannelRequest":{ + "type":"structure", + "required":[ + "ChannelArn", + "Name", + "Mode" + ], + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel.

", + "location":"uri", + "locationName":"channelArn" + }, + "Name":{ + "shape":"NonEmptyResourceName", + "documentation":"

The name of the channel.

" + }, + "Mode":{ + "shape":"ChannelMode", + "documentation":"

The mode of the update request.

" + }, + "Metadata":{ + "shape":"Metadata", + "documentation":"

The metadata of the channel.

" + } + } + }, + "UpdateChannelResponse":{ + "type":"structure", + "members":{ + "ChannelArn":{ + "shape":"ChimeArn", + "documentation":"

The ARN of the channel.

" + } + } + }, "UpdateGlobalSettingsRequest":{ "type":"structure", "required":[ @@ -7051,6 +10873,71 @@ } } }, + "UpdateSipMediaApplicationRequest":{ + "type":"structure", + "required":["SipMediaApplicationId"], + "members":{ + "SipMediaApplicationId":{ + "shape":"NonEmptyString", + "documentation":"

The SIP media application ID.

", + "location":"uri", + "locationName":"sipMediaApplicationId" + }, + "Name":{ + "shape":"SipMediaApplicationName", + "documentation":"

The new name for the specified SIP media application.

" + }, + "Endpoints":{ + "shape":"SipMediaApplicationEndpointList", + "documentation":"

The new set of endpoints for the specified SIP media application.

" + } + } + }, + "UpdateSipMediaApplicationResponse":{ + "type":"structure", + "members":{ + "SipMediaApplication":{ + "shape":"SipMediaApplication", + "documentation":"

The updated SIP media application details.

" + } + } + }, + "UpdateSipRuleRequest":{ + "type":"structure", + "required":[ + "SipRuleId", + "Name" + ], + "members":{ + "SipRuleId":{ + "shape":"NonEmptyString", + "documentation":"

The SIP rule ID.

", + "location":"uri", + "locationName":"sipRuleId" + }, + "Name":{ + "shape":"SipRuleName", + "documentation":"

The new name for the specified SIP rule.

" + }, + "Disabled":{ + "shape":"NullableBoolean", + "documentation":"

The new value specified to indicate whether the rule is disabled.

" + }, + "TargetApplications":{ + "shape":"SipRuleTargetApplicationList", + "documentation":"

The new value of the list of target applications.

" + } + } + }, + "UpdateSipRuleResponse":{ + "type":"structure", + "members":{ + "SipRule":{ + "shape":"SipRule", + "documentation":"

Updated SIP rule details.

" + } + } + }, "UpdateUserRequest":{ "type":"structure", "required":[ @@ -7217,6 +11104,10 @@ "type":"string", "max":4096 }, + "UrlType":{ + "type":"string", + "max":4096 + }, "User":{ "type":"structure", "required":["UserId"], @@ -7303,6 +11194,13 @@ "type":"list", "member":{"shape":"UserError"} }, + "UserId":{ + "type":"string", + "max":50, + "min":1, + "pattern":"[A-Za-z0-9][A-Za-z0-9\\:\\-\\_\\.\\@]{3,50}[A-Za-z0-9]", + "sensitive":true + }, "UserIdList":{ "type":"list", "member":{"shape":"NonEmptyString"}, @@ -7312,6 +11210,13 @@ "type":"list", "member":{"shape":"User"} }, + "UserName":{ + "type":"string", + "max":100, + "min":1, + "pattern":".*\\S.*", + "sensitive":true + }, "UserSettings":{ "type":"structure", "required":["Telephony"], @@ -7454,5 +11359,5 @@ "documentation":"

The Amazon Chime Voice Connector settings. Includes any Amazon S3 buckets designated for storing call detail records.

" } }, - "documentation":"

The Amazon Chime API (application programming interface) is designed for developers to perform key tasks, such as creating and managing Amazon Chime accounts, users, and Voice Connectors. This guide provides detailed information about the Amazon Chime API, including operations, types, inputs and outputs, and error codes. It also includes some server-side API actions to use with the Amazon Chime SDK. For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime Developer Guide.

You can use an AWS SDK, the AWS Command Line Interface (AWS CLI), or the REST API to make API calls. We recommend using an AWS SDK or the AWS CLI. Each API operation includes links to information about using it with a language-specific AWS SDK or the AWS CLI.

Using an AWS SDK

You don't need to write code to calculate a signature for request authentication. The SDK clients authenticate your requests by using access keys that you provide. For more information about AWS SDKs, see the AWS Developer Center.

Using the AWS CLI

Use your access keys with the AWS CLI to make API calls. For information about setting up the AWS CLI, see Installing the AWS Command Line Interface in the AWS Command Line Interface User Guide. For a list of available Amazon Chime commands, see the Amazon Chime commands in the AWS CLI Command Reference.

Using REST API

If you use REST to make API calls, you must authenticate your request by providing a signature. Amazon Chime supports signature version 4. For more information, see Signature Version 4 Signing Process in the Amazon Web Services General Reference.

When making REST API calls, use the service name chime and REST endpoint https://service.chime.aws.amazon.com.

Administrative permissions are controlled using AWS Identity and Access Management (IAM). For more information, see Identity and Access Management for Amazon Chime in the Amazon Chime Administration Guide.

" + "documentation":"

The Amazon Chime API (application programming interface) is designed for developers to perform key tasks, such as creating and managing Amazon Chime accounts, users, and Voice Connectors. This guide provides detailed information about the Amazon Chime API, including operations, types, inputs and outputs, and error codes. It also includes some server-side API actions to use with the Amazon Chime SDK. For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime Developer Guide.

You can use an AWS SDK, the AWS Command Line Interface (AWS CLI), or the REST API to make API calls. We recommend using an AWS SDK or the AWS CLI. Each API operation includes links to information about using it with a language-specific AWS SDK or the AWS CLI.

Using an AWS SDK

You don't need to write code to calculate a signature for request authentication. The SDK clients authenticate your requests by using access keys that you provide. For more information about AWS SDKs, see the AWS Developer Center.

Using the AWS CLI

Use your access keys with the AWS CLI to make API calls. For information about setting up the AWS CLI, see Installing the AWS Command Line Interface in the AWS Command Line Interface User Guide. For a list of available Amazon Chime commands, see the Amazon Chime commands in the AWS CLI Command Reference.

Using REST

If you use REST to make API calls, you must authenticate your request by providing a signature. Amazon Chime supports Signature Version 4. For more information, see Signature Version 4 Signing Process in the Amazon Web Services General Reference.

When making REST API calls, use the service name chime and REST endpoint https://service.chime.aws.amazon.com.

Administrative permissions are controlled using AWS Identity and Access Management (IAM). For more information, see Identity and Access Management for Amazon Chime in the Amazon Chime Administration Guide.

" } diff --git a/services/cloud9/pom.xml b/services/cloud9/pom.xml index 50467690bb07..2c71ea18cce5 100644 --- a/services/cloud9/pom.xml +++ b/services/cloud9/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT 4.0.0 cloud9 diff --git a/services/clouddirectory/pom.xml b/services/clouddirectory/pom.xml index 22f5a43b3d4a..f0b0b798c975 100644 --- a/services/clouddirectory/pom.xml +++ b/services/clouddirectory/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT clouddirectory AWS Java SDK :: Services :: Amazon CloudDirectory diff --git a/services/cloudformation/pom.xml b/services/cloudformation/pom.xml index 9b1571ee0aa2..c8ee7f1f00cc 100644 --- a/services/cloudformation/pom.xml +++ b/services/cloudformation/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT cloudformation AWS Java SDK :: Services :: AWS CloudFormation diff --git a/services/cloudformation/src/main/resources/codegen-resources/service-2.json b/services/cloudformation/src/main/resources/codegen-resources/service-2.json index 48e7d3e601c3..95134db6aa07 100644 --- a/services/cloudformation/src/main/resources/codegen-resources/service-2.json +++ b/services/cloudformation/src/main/resources/codegen-resources/service-2.json @@ -55,7 +55,7 @@ {"shape":"InsufficientCapabilitiesException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Creates a list of changes that will be applied to a stack so that you can review the changes before executing them. You can create a change set for a stack that doesn't exist or an existing stack. If you create a change set for a stack that doesn't exist, the change set shows all of the resources that AWS CloudFormation will create. If you create a change set for an existing stack, AWS CloudFormation compares the stack's information with the information that you submit in the change set and lists the differences. Use change sets to understand which resources AWS CloudFormation will create or change, and how it will change resources in an existing stack, before you create or update a stack.

To create a change set for a stack that doesn't exist, for the ChangeSetType parameter, specify CREATE. To create a change set for an existing stack, specify UPDATE for the ChangeSetType parameter. To create a change set for an import operation, specify IMPORT for the ChangeSetType parameter. After the CreateChangeSet call successfully completes, AWS CloudFormation starts creating the change set. To check the status of the change set or to review it, use the DescribeChangeSet action.

When you are satisfied with the changes the change set will make, execute the change set by using the ExecuteChangeSet action. AWS CloudFormation doesn't make changes until you execute the change set.

" + "documentation":"

Creates a list of changes that will be applied to a stack so that you can review the changes before executing them. You can create a change set for a stack that doesn't exist or an existing stack. If you create a change set for a stack that doesn't exist, the change set shows all of the resources that AWS CloudFormation will create. If you create a change set for an existing stack, AWS CloudFormation compares the stack's information with the information that you submit in the change set and lists the differences. Use change sets to understand which resources AWS CloudFormation will create or change, and how it will change resources in an existing stack, before you create or update a stack.

To create a change set for a stack that doesn't exist, for the ChangeSetType parameter, specify CREATE. To create a change set for an existing stack, specify UPDATE for the ChangeSetType parameter. To create a change set for an import operation, specify IMPORT for the ChangeSetType parameter. After the CreateChangeSet call successfully completes, AWS CloudFormation starts creating the change set. To check the status of the change set or to review it, use the DescribeChangeSet action.

When you are satisfied with the changes the change set will make, execute the change set by using the ExecuteChangeSet action. AWS CloudFormation doesn't make changes until you execute the change set.

To create a change set for the entire stack hierachy, set IncludeNestedStacks to True.

" }, "CreateStack":{ "name":"CreateStack", @@ -129,7 +129,7 @@ "errors":[ {"shape":"InvalidChangeSetStatusException"} ], - "documentation":"

Deletes the specified change set. Deleting change sets ensures that no one executes the wrong change set.

If the call successfully completes, AWS CloudFormation successfully deleted the change set.

" + "documentation":"

Deletes the specified change set. Deleting change sets ensures that no one executes the wrong change set.

If the call successfully completes, AWS CloudFormation successfully deleted the change set.

If IncludeNestedStacks specifies True during the creation of the nested change set, then DeleteChangeSet will delete all change sets that belong to the stacks hierarchy and will also delete all change sets for nested stacks with the status of REVIEW_IN_PROGRESS.

" }, "DeleteStack":{ "name":"DeleteStack", @@ -464,7 +464,7 @@ {"shape":"InsufficientCapabilitiesException"}, {"shape":"TokenAlreadyExistsException"} ], - "documentation":"

Updates a stack using the input information that was provided when the specified change set was created. After the call successfully completes, AWS CloudFormation starts updating the stack. Use the DescribeStacks action to view the status of the update.

When you execute a change set, AWS CloudFormation deletes all other change sets associated with the stack because they aren't valid for the updated stack.

If a stack policy is associated with the stack, AWS CloudFormation enforces the policy during the update. You can't specify a temporary stack policy that overrides the current policy.

" + "documentation":"

Updates a stack using the input information that was provided when the specified change set was created. After the call successfully completes, AWS CloudFormation starts updating the stack. Use the DescribeStacks action to view the status of the update.

When you execute a change set, AWS CloudFormation deletes all other change sets associated with the stack because they aren't valid for the updated stack.

If a stack policy is associated with the stack, AWS CloudFormation enforces the policy during the update. You can't specify a temporary stack policy that overrides the current policy.

To create a change set for the entire stack hierachy, IncludeNestedStacks must have been set to True.

" }, "GetStackPolicy":{ "name":"GetStackPolicy", @@ -1019,7 +1019,8 @@ "Add", "Modify", "Remove", - "Import" + "Import", + "Dynamic" ] }, "ChangeSetId":{ @@ -1057,7 +1058,10 @@ "CREATE_PENDING", "CREATE_IN_PROGRESS", "CREATE_COMPLETE", + "DELETE_PENDING", + "DELETE_IN_PROGRESS", "DELETE_COMPLETE", + "DELETE_FAILED", "FAILED" ] }, @@ -1104,6 +1108,18 @@ "Description":{ "shape":"Description", "documentation":"

Descriptive information about the change set.

" + }, + "IncludeNestedStacks":{ + "shape":"IncludeNestedStacks", + "documentation":"

Specifies the current setting of IncludeNestedStacks for the change set.

" + }, + "ParentChangeSetId":{ + "shape":"ChangeSetId", + "documentation":"

The parent change set ID.

" + }, + "RootChangeSetId":{ + "shape":"ChangeSetId", + "documentation":"

The root change set ID.

" } }, "documentation":"

The ChangeSetSummary structure describes a change set, its status, and the stack with which it's associated.

" @@ -1203,7 +1219,7 @@ }, "Capabilities":{ "shape":"Capabilities", - "documentation":"

In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for AWS CloudFormation to create the stack.

  • CAPABILITY_IAM and CAPABILITY_NAMED_IAM

    Some stack templates might include resources that can affect permissions in your AWS account; for example, by creating new AWS Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities.

    The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.

    • If you have IAM resources, you can specify either capability.

    • If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM.

    • If you don't specify either of these capabilities, AWS CloudFormation returns an InsufficientCapabilities error.

    If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary.

    For more information, see Acknowledging IAM Resources in AWS CloudFormation Templates.

  • CAPABILITY_AUTO_EXPAND

    Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually creating the stack. If your stack template contains one or more macros, and you choose to create a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by AWS CloudFormation.

    This capacity does not apply to creating change sets, and specifying it when creating change sets has no effect.

    Also, change sets do not currently support nested stacks. If you want to create a stack from a stack template that contains macros and nested stacks, you must create or update the stack directly from the template using the CreateStack or UpdateStack action, and specifying this capability.

    For more information on macros, see Using AWS CloudFormation Macros to Perform Custom Processing on Templates.

" + "documentation":"

In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for AWS CloudFormation to create the stack.

  • CAPABILITY_IAM and CAPABILITY_NAMED_IAM

    Some stack templates might include resources that can affect permissions in your AWS account; for example, by creating new AWS Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities.

    The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.

    • If you have IAM resources, you can specify either capability.

    • If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM.

    • If you don't specify either of these capabilities, AWS CloudFormation returns an InsufficientCapabilities error.

    If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary.

    For more information, see Acknowledging IAM Resources in AWS CloudFormation Templates.

  • CAPABILITY_AUTO_EXPAND

    Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually creating the stack. If your stack template contains one or more macros, and you choose to create a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by AWS CloudFormation.

    This capacity does not apply to creating change sets, and specifying it when creating change sets has no effect.

    If you want to create a stack from a stack template that contains macros and nested stacks, you must create or update the stack directly from the template using the CreateStack or UpdateStack action, and specifying this capability.

    For more information on macros, see Using AWS CloudFormation Macros to Perform Custom Processing on Templates.

" }, "ResourceTypes":{ "shape":"ResourceTypes", @@ -1244,6 +1260,10 @@ "ResourcesToImport":{ "shape":"ResourcesToImport", "documentation":"

The resources to import into your stack.

" + }, + "IncludeNestedStacks":{ + "shape":"IncludeNestedStacks", + "documentation":"

Creates a change set for the all nested stacks specified in the template. The default behavior of this action is set to False. To include nested sets in a change set, specify True.

" } }, "documentation":"

The input for the CreateChangeSet action.

" @@ -1300,7 +1320,7 @@ }, "Capabilities":{ "shape":"Capabilities", - "documentation":"

In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for AWS CloudFormation to create the stack.

  • CAPABILITY_IAM and CAPABILITY_NAMED_IAM

    Some stack templates might include resources that can affect permissions in your AWS account; for example, by creating new AWS Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities.

    The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.

    • If you have IAM resources, you can specify either capability.

    • If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM.

    • If you don't specify either of these capabilities, AWS CloudFormation returns an InsufficientCapabilities error.

    If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary.

    For more information, see Acknowledging IAM Resources in AWS CloudFormation Templates.

  • CAPABILITY_AUTO_EXPAND

    Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually creating the stack. If your stack template contains one or more macros, and you choose to create a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by AWS CloudFormation.

    Change sets do not currently support nested stacks. If you want to create a stack from a stack template that contains macros and nested stacks, you must create the stack directly from the template using this capability.

    You should only create stacks directly from a stack template that contains macros if you know what processing the macro performs.

    Each macro relies on an underlying Lambda service function for processing stack templates. Be aware that the Lambda function owner can update the function operation without AWS CloudFormation being notified.

    For more information, see Using AWS CloudFormation Macros to Perform Custom Processing on Templates.

" + "documentation":"

In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for AWS CloudFormation to create the stack.

  • CAPABILITY_IAM and CAPABILITY_NAMED_IAM

    Some stack templates might include resources that can affect permissions in your AWS account; for example, by creating new AWS Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities.

    The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.

    • If you have IAM resources, you can specify either capability.

    • If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM.

    • If you don't specify either of these capabilities, AWS CloudFormation returns an InsufficientCapabilities error.

    If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary.

    For more information, see Acknowledging IAM Resources in AWS CloudFormation Templates.

  • CAPABILITY_AUTO_EXPAND

    Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually creating the stack. If your stack template contains one or more macros, and you choose to create a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by AWS CloudFormation.

    If you want to create a stack from a stack template that contains macros and nested stacks, you must create the stack directly from the template using this capability.

    You should only create stacks directly from a stack template that contains macros if you know what processing the macro performs.

    Each macro relies on an underlying Lambda service function for processing stack templates. Be aware that the Lambda function owner can update the function operation without AWS CloudFormation being notified.

    For more information, see Using AWS CloudFormation Macros to Perform Custom Processing on Templates.

" }, "ResourceTypes":{ "shape":"ResourceTypes", @@ -1591,7 +1611,7 @@ "documentation":"

The organization root ID or organizational unit (OU) IDs to which StackSets deploys.

" } }, - "documentation":"

[Service-managed permissions] The AWS Organizations accounts to which StackSets deploys. StackSets does not deploy stack instances to the organization master account, even if the master account is in your organization or in an OU in your organization.

For update operations, you can specify either Accounts or OrganizationalUnitIds. For create and delete operations, specify OrganizationalUnitIds.

" + "documentation":"

[Service-managed permissions] The AWS Organizations accounts to which StackSets deploys. StackSets does not deploy stack instances to the organization management account, even if the organization management account is in your organization or in an OU in your organization.

For update operations, you can specify either Accounts or OrganizationalUnitIds. For create and delete operations, specify OrganizationalUnitIds.

" }, "DeprecatedStatus":{ "type":"string", @@ -1735,6 +1755,18 @@ "NextToken":{ "shape":"NextToken", "documentation":"

If the output exceeds 1 MB, a string that identifies the next page of changes. If there is no additional page, this value is null.

" + }, + "IncludeNestedStacks":{ + "shape":"IncludeNestedStacks", + "documentation":"

Verifies if IncludeNestedStacks is set to True.

" + }, + "ParentChangeSetId":{ + "shape":"ChangeSetId", + "documentation":"

Specifies the change set ID of the parent change set in the current nested change set hierarchy.

" + }, + "RootChangeSetId":{ + "shape":"ChangeSetId", + "documentation":"

Specifies the change set ID of the root change set in the current nested change set hierarchy.

" } }, "documentation":"

The output for the DescribeChangeSet action.

" @@ -2496,6 +2528,7 @@ "type":"integer", "min":0 }, + "IncludeNestedStacks":{"type":"boolean"}, "InsufficientCapabilitiesException":{ "type":"structure", "members":{ @@ -2937,6 +2970,10 @@ "shape":"DeprecatedStatus", "documentation":"

The deprecation status of the types that you want to get summary information about.

Valid values include:

  • LIVE: The type is registered for use in CloudFormation operations.

  • DEPRECATED: The type has been deregistered and can no longer be used in CloudFormation operations.

" }, + "Type":{ + "shape":"RegistryType", + "documentation":"

The type of extension.

" + }, "MaxResults":{ "shape":"MaxResults", "documentation":"

The maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.

" @@ -2984,6 +3021,7 @@ }, "documentation":"

Contains logging configuration information for a type.

" }, + "LogicalIdHierarchy":{"type":"string"}, "LogicalResourceId":{"type":"string"}, "LogicalResourceIds":{ "type":"list", @@ -3006,6 +3044,20 @@ "min":1 }, "Metadata":{"type":"string"}, + "ModuleInfo":{ + "type":"structure", + "members":{ + "TypeHierarchy":{ + "shape":"TypeHierarchy", + "documentation":"

A concantenated list of the the module type or types containing the resource. Module types are listed starting with the inner-most nested module, and separated by /.

In the following example, the resource was created from a module of type AWS::First::Example::MODULE, that is nested inside a parent module of type AWS::Second::Example::MODULE.

AWS::First::Example::MODULE/AWS::Second::Example::MODULE

" + }, + "LogicalIdHierarchy":{ + "shape":"LogicalIdHierarchy", + "documentation":"

A concantenated list of the logical IDs of the module or modules containing the resource. Modules are listed starting with the inner-most nested module, and separated by /.

In the following example, the resource was created from a module, moduleA, that is nested inside a parent module, moduleB.

moduleA/moduleB

For more information, see Referencing resources in a module in the CloudFormation User Guide.

" + } + }, + "documentation":"

Contains information about the module from which the resource was created, if the resource was created from a module included in the stack template.

For more information on modules, see Using modules to encapsulate and reuse resource configurations in the CloudFormation User Guide.

" + }, "MonitoringTimeInMinutes":{ "type":"integer", "max":180, @@ -3361,7 +3413,7 @@ }, "SchemaHandlerPackage":{ "shape":"S3Url", - "documentation":"

A url to the S3 bucket containing the schema handler package that contains the schema, event handlers, and associated files for the type you want to register.

For information on generating a schema handler package for the type you want to register, see submit in the CloudFormation CLI User Guide.

As part of registering a resource provider type, CloudFormation must be able to access the S3 bucket which contains the schema handler package for that resource provider. For more information, see IAM Permissions for Registering a Resource Provider in the AWS CloudFormation User Guide.

" + "documentation":"

A url to the S3 bucket containing the schema handler package that contains the schema, event handlers, and associated files for the type you want to register.

For information on generating a schema handler package for the type you want to register, see submit in the CloudFormation CLI User Guide.

The user registering the resource provider type must be able to access the the schema handler package in the S3 bucket. That is, the user needs to have GetObject permissions for the schema handler package. For more information, see Actions, Resources, and Condition Keys for Amazon S3 in the AWS Identity and Access Management User Guide.

" }, "LoggingConfig":{ "shape":"LoggingConfig", @@ -3406,7 +3458,10 @@ }, "RegistryType":{ "type":"string", - "enum":["RESOURCE"] + "enum":[ + "RESOURCE", + "MODULE" + ] }, "Replacement":{ "type":"string", @@ -3446,7 +3501,7 @@ "members":{ "Action":{ "shape":"ChangeAction", - "documentation":"

The action that AWS CloudFormation takes on the resource, such as Add (adds a new resource), Modify (changes a resource), or Remove (deletes a resource).

" + "documentation":"

The action that AWS CloudFormation takes on the resource, such as Add (adds a new resource), Modify (changes a resource), Remove (deletes a resource), Import (imports a resource), or Dynamic (exact action for the resource cannot be determined).

" }, "LogicalResourceId":{ "shape":"LogicalResourceId", @@ -3471,6 +3526,14 @@ "Details":{ "shape":"ResourceChangeDetails", "documentation":"

For the Modify action, a list of ResourceChangeDetail structures that describes the changes that AWS CloudFormation will make to the resource.

" + }, + "ChangeSetId":{ + "shape":"ChangeSetId", + "documentation":"

The change set ID of the nested change set.

" + }, + "ModuleInfo":{ + "shape":"ModuleInfo", + "documentation":"

Contains information about the module from which the resource was created, if the resource was created from a module included in the stack template.

" } }, "documentation":"

The ResourceChange structure describes the resource and the action that AWS CloudFormation will perform on it if you execute this change set.

" @@ -4240,6 +4303,10 @@ "DriftInformation":{ "shape":"StackResourceDriftInformation", "documentation":"

Information about whether the resource's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. For more information, see Detecting Unregulated Configuration Changes to Stacks and Resources.

" + }, + "ModuleInfo":{ + "shape":"ModuleInfo", + "documentation":"

Contains information about the module from which the resource was created, if the resource was created from a module included in the stack template.

" } }, "documentation":"

The StackResource data type.

" @@ -4296,6 +4363,10 @@ "DriftInformation":{ "shape":"StackResourceDriftInformation", "documentation":"

Information about whether the resource's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. For more information, see Detecting Unregulated Configuration Changes to Stacks and Resources.

" + }, + "ModuleInfo":{ + "shape":"ModuleInfo", + "documentation":"

Contains information about the module from which the resource was created, if the resource was created from a module included in the stack template.

" } }, "documentation":"

Contains detailed information about the specified stack resource.

" @@ -4349,6 +4420,10 @@ "Timestamp":{ "shape":"Timestamp", "documentation":"

Time at which AWS CloudFormation performed drift detection on the stack resource.

" + }, + "ModuleInfo":{ + "shape":"ModuleInfo", + "documentation":"

Contains information about the module from which the resource was created, if the resource was created from a module included in the stack template.

" } }, "documentation":"

Contains the drift information for a resource that has been checked for drift. This includes actual and expected property values for resources in which AWS CloudFormation has detected drift. Only resource properties explicitly defined in the stack template are checked for drift. For more information, see Detecting Unregulated Configuration Changes to Stacks and Resources.

Resources that do not currently support drift detection cannot be checked. For a list of resources that support drift detection, see Resources that Support Drift Detection.

Use DetectStackResourceDrift to detect drift on individual resources, or DetectStackDrift to detect drift on all resources in a given stack that support drift detection.

" @@ -4442,6 +4517,10 @@ "DriftInformation":{ "shape":"StackResourceDriftInformationSummary", "documentation":"

Information about whether the resource's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. For more information, see Detecting Unregulated Configuration Changes to Stacks and Resources.

" + }, + "ModuleInfo":{ + "shape":"ModuleInfo", + "documentation":"

Contains information about the module from which the resource was created, if the resource was created from a module included in the stack template.

" } }, "documentation":"

Contains high-level information about the specified stack resource.

" @@ -5071,11 +5150,12 @@ "max":1024, "pattern":"arn:aws[A-Za-z0-9-]{0,64}:cloudformation:[A-Za-z0-9-]{1,64}:([0-9]{12})?:type/.+" }, + "TypeHierarchy":{"type":"string"}, "TypeName":{ "type":"string", - "max":196, + "max":204, "min":10, - "pattern":"[A-Za-z0-9]{2,64}::[A-Za-z0-9]{2,64}::[A-Za-z0-9]{2,64}" + "pattern":"[A-Za-z0-9]{2,64}::[A-Za-z0-9]{2,64}::[A-Za-z0-9]{2,64}(::MODULE){0,1}" }, "TypeNotFoundException":{ "type":"structure", @@ -5206,7 +5286,7 @@ }, "Capabilities":{ "shape":"Capabilities", - "documentation":"

In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for AWS CloudFormation to update the stack.

  • CAPABILITY_IAM and CAPABILITY_NAMED_IAM

    Some stack templates might include resources that can affect permissions in your AWS account; for example, by creating new AWS Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities.

    The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.

    • If you have IAM resources, you can specify either capability.

    • If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM.

    • If you don't specify either of these capabilities, AWS CloudFormation returns an InsufficientCapabilities error.

    If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary.

    For more information, see Acknowledging IAM Resources in AWS CloudFormation Templates.

  • CAPABILITY_AUTO_EXPAND

    Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually updating the stack. If your stack template contains one or more macros, and you choose to update a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by AWS CloudFormation.

    Change sets do not currently support nested stacks. If you want to update a stack from a stack template that contains macros and nested stacks, you must update the stack directly from the template using this capability.

    You should only update stacks directly from a stack template that contains macros if you know what processing the macro performs.

    Each macro relies on an underlying Lambda service function for processing stack templates. Be aware that the Lambda function owner can update the function operation without AWS CloudFormation being notified.

    For more information, see Using AWS CloudFormation Macros to Perform Custom Processing on Templates.

" + "documentation":"

In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for AWS CloudFormation to update the stack.

  • CAPABILITY_IAM and CAPABILITY_NAMED_IAM

    Some stack templates might include resources that can affect permissions in your AWS account; for example, by creating new AWS Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities.

    The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.

    • If you have IAM resources, you can specify either capability.

    • If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM.

    • If you don't specify either of these capabilities, AWS CloudFormation returns an InsufficientCapabilities error.

    If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary.

    For more information, see Acknowledging IAM Resources in AWS CloudFormation Templates.

  • CAPABILITY_AUTO_EXPAND

    Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually updating the stack. If your stack template contains one or more macros, and you choose to update a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by AWS CloudFormation.

    If you want to update a stack from a stack template that contains macros and nested stacks, you must update the stack directly from the template using this capability.

    You should only update stacks directly from a stack template that contains macros if you know what processing the macro performs.

    Each macro relies on an underlying Lambda service function for processing stack templates. Be aware that the Lambda function owner can update the function operation without AWS CloudFormation being notified.

    For more information, see Using AWS CloudFormation Macros to Perform Custom Processing on Templates.

" }, "ResourceTypes":{ "shape":"ResourceTypes", diff --git a/services/cloudfront/pom.xml b/services/cloudfront/pom.xml index 4c36c08ef937..ecbe46c82f83 100644 --- a/services/cloudfront/pom.xml +++ b/services/cloudfront/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT cloudfront AWS Java SDK :: Services :: Amazon CloudFront diff --git a/services/cloudfront/src/main/resources/codegen-resources/service-2.json b/services/cloudfront/src/main/resources/codegen-resources/service-2.json index b5e8d203d442..a1e7dfe0b11c 100644 --- a/services/cloudfront/src/main/resources/codegen-resources/service-2.json +++ b/services/cloudfront/src/main/resources/codegen-resources/service-2.json @@ -109,7 +109,10 @@ {"shape":"NoSuchCachePolicy"}, {"shape":"TooManyDistributionsAssociatedToCachePolicy"}, {"shape":"NoSuchOriginRequestPolicy"}, - {"shape":"TooManyDistributionsAssociatedToOriginRequestPolicy"} + {"shape":"TooManyDistributionsAssociatedToOriginRequestPolicy"}, + {"shape":"TooManyDistributionsAssociatedToKeyGroup"}, + {"shape":"TooManyKeyGroupsAssociatedToDistribution"}, + {"shape":"TrustedKeyGroupDoesNotExist"} ], "documentation":"

Creates a new web distribution. You create a CloudFront distribution to tell CloudFront where you want content to be delivered from, and the details about how to track and manage content delivery. Send a POST request to the /CloudFront API version/distribution/distribution ID resource.

When you update a distribution, there are more required fields than when you create a distribution. When you update your distribution by using UpdateDistribution, follow the steps included in the documentation to get the current configuration and then make your updates. This helps to make sure that you include all of the required fields. To view a summary, see Required Fields for Create Distribution and Update Distribution in the Amazon CloudFront Developer Guide.

" }, @@ -172,7 +175,10 @@ {"shape":"NoSuchCachePolicy"}, {"shape":"TooManyDistributionsAssociatedToCachePolicy"}, {"shape":"NoSuchOriginRequestPolicy"}, - {"shape":"TooManyDistributionsAssociatedToOriginRequestPolicy"} + {"shape":"TooManyDistributionsAssociatedToOriginRequestPolicy"}, + {"shape":"TooManyDistributionsAssociatedToKeyGroup"}, + {"shape":"TooManyKeyGroupsAssociatedToDistribution"}, + {"shape":"TrustedKeyGroupDoesNotExist"} ], "documentation":"

Create a new distribution with tags.

" }, @@ -238,6 +244,23 @@ ], "documentation":"

Create a new invalidation.

" }, + "CreateKeyGroup":{ + "name":"CreateKeyGroup2020_05_31", + "http":{ + "method":"POST", + "requestUri":"/2020-05-31/key-group", + "responseCode":201 + }, + "input":{"shape":"CreateKeyGroupRequest"}, + "output":{"shape":"CreateKeyGroupResult"}, + "errors":[ + {"shape":"InvalidArgument"}, + {"shape":"KeyGroupAlreadyExists"}, + {"shape":"TooManyKeyGroups"}, + {"shape":"TooManyPublicKeysInKeyGroup"} + ], + "documentation":"

Creates a key group that you can use with CloudFront signed URLs and signed cookies.

To create a key group, you must specify at least one public key for the key group. After you create a key group, you can reference it from one or more cache behaviors. When you reference a key group in a cache behavior, CloudFront requires signed URLs or signed cookies for all requests that match the cache behavior. The URLs or cookies must be signed with a private key whose corresponding public key is in the key group. The signed URL or cookie contains information about which public key CloudFront should use to verify the signature. For more information, see Serving private content in the Amazon CloudFront Developer Guide.

" + }, "CreateMonitoringSubscription":{ "name":"CreateMonitoringSubscription2020_05_31", "http":{ @@ -287,7 +310,7 @@ {"shape":"InvalidArgument"}, {"shape":"TooManyPublicKeys"} ], - "documentation":"

Add a new public key to CloudFront to use, for example, for field-level encryption. You can add a maximum of 10 public keys with one AWS account.

" + "documentation":"

Uploads a public key to CloudFront that you can use with signed URLs and signed cookies, or with field-level encryption.

" }, "CreateRealtimeLogConfig":{ "name":"CreateRealtimeLogConfig2020_05_31", @@ -305,7 +328,8 @@ "errors":[ {"shape":"RealtimeLogConfigAlreadyExists"}, {"shape":"TooManyRealtimeLogConfigs"}, - {"shape":"InvalidArgument"} + {"shape":"InvalidArgument"}, + {"shape":"AccessDenied"} ], "documentation":"

Creates a real-time log configuration.

After you create a real-time log configuration, you can attach it to one or more cache behaviors to send real-time log data to the specified Amazon Kinesis data stream.

For more information about real-time log configurations, see Real-time logs in the Amazon CloudFront Developer Guide.

" }, @@ -446,6 +470,22 @@ ], "documentation":"

Remove a field-level encryption profile.

" }, + "DeleteKeyGroup":{ + "name":"DeleteKeyGroup2020_05_31", + "http":{ + "method":"DELETE", + "requestUri":"/2020-05-31/key-group/{Id}", + "responseCode":204 + }, + "input":{"shape":"DeleteKeyGroupRequest"}, + "errors":[ + {"shape":"InvalidIfMatchVersion"}, + {"shape":"NoSuchResource"}, + {"shape":"PreconditionFailed"}, + {"shape":"ResourceInUse"} + ], + "documentation":"

Deletes a key group.

You cannot delete a key group that is referenced in a cache behavior. First update your distributions to remove the key group from all cache behaviors, then delete the key group.

To delete a key group, you must provide the key group’s identifier and version. To get these values, use ListKeyGroups followed by GetKeyGroup or GetKeyGroupConfig.

" + }, "DeleteMonitoringSubscription":{ "name":"DeleteMonitoringSubscription2020_05_31", "http":{ @@ -510,7 +550,8 @@ "errors":[ {"shape":"NoSuchRealtimeLogConfig"}, {"shape":"RealtimeLogConfigInUse"}, - {"shape":"InvalidArgument"} + {"shape":"InvalidArgument"}, + {"shape":"AccessDenied"} ], "documentation":"

Deletes a real-time log configuration.

You cannot delete a real-time log configuration if it’s attached to a cache behavior. First update your distributions to remove the real-time log configuration from all cache behaviors, then delete the real-time log configuration.

To delete a real-time log configuration, you can provide the configuration’s name or its Amazon Resource Name (ARN). You must provide at least one. If you provide both, CloudFront uses the name to identify the real-time log configuration to delete.

" }, @@ -686,6 +727,32 @@ ], "documentation":"

Get the information about an invalidation.

" }, + "GetKeyGroup":{ + "name":"GetKeyGroup2020_05_31", + "http":{ + "method":"GET", + "requestUri":"/2020-05-31/key-group/{Id}" + }, + "input":{"shape":"GetKeyGroupRequest"}, + "output":{"shape":"GetKeyGroupResult"}, + "errors":[ + {"shape":"NoSuchResource"} + ], + "documentation":"

Gets a key group, including the date and time when the key group was last modified.

To get a key group, you must provide the key group’s identifier. If the key group is referenced in a distribution’s cache behavior, you can get the key group’s identifier using ListDistributions or GetDistribution. If the key group is not referenced in a cache behavior, you can get the identifier using ListKeyGroups.

" + }, + "GetKeyGroupConfig":{ + "name":"GetKeyGroupConfig2020_05_31", + "http":{ + "method":"GET", + "requestUri":"/2020-05-31/key-group/{Id}/config" + }, + "input":{"shape":"GetKeyGroupConfigRequest"}, + "output":{"shape":"GetKeyGroupConfigResult"}, + "errors":[ + {"shape":"NoSuchResource"} + ], + "documentation":"

Gets a key group configuration.

To get a key group configuration, you must provide the key group’s identifier. If the key group is referenced in a distribution’s cache behavior, you can get the key group’s identifier using ListDistributions or GetDistribution. If the key group is not referenced in a cache behavior, you can get the identifier using ListKeyGroups.

" + }, "GetMonitoringSubscription":{ "name":"GetMonitoringSubscription2020_05_31", "http":{ @@ -740,7 +807,7 @@ {"shape":"AccessDenied"}, {"shape":"NoSuchPublicKey"} ], - "documentation":"

Get the public key information.

" + "documentation":"

Gets a public key.

" }, "GetPublicKeyConfig":{ "name":"GetPublicKeyConfig2020_05_31", @@ -754,7 +821,7 @@ {"shape":"AccessDenied"}, {"shape":"NoSuchPublicKey"} ], - "documentation":"

Return public key configuration informaation

" + "documentation":"

Gets a public key configuration.

" }, "GetRealtimeLogConfig":{ "name":"GetRealtimeLogConfig2020_05_31", @@ -770,7 +837,8 @@ "output":{"shape":"GetRealtimeLogConfigResult"}, "errors":[ {"shape":"NoSuchRealtimeLogConfig"}, - {"shape":"InvalidArgument"} + {"shape":"InvalidArgument"}, + {"shape":"AccessDenied"} ], "documentation":"

Gets a real-time log configuration.

To get a real-time log configuration, you can provide the configuration’s name or its Amazon Resource Name (ARN). You must provide at least one. If you provide both, CloudFront uses the name to identify the real-time log configuration to get.

" }, @@ -858,6 +926,20 @@ ], "documentation":"

Gets a list of distribution IDs for distributions that have a cache behavior that’s associated with the specified cache policy.

You can optionally specify the maximum number of items to receive in the response. If the total number of items in the list exceeds the maximum that you specify, or the default maximum, the response is paginated. To get the next page of items, send a subsequent request that specifies the NextMarker value from the current response as the Marker value in the subsequent request.

" }, + "ListDistributionsByKeyGroup":{ + "name":"ListDistributionsByKeyGroup2020_05_31", + "http":{ + "method":"GET", + "requestUri":"/2020-05-31/distributionsByKeyGroupId/{KeyGroupId}" + }, + "input":{"shape":"ListDistributionsByKeyGroupRequest"}, + "output":{"shape":"ListDistributionsByKeyGroupResult"}, + "errors":[ + {"shape":"NoSuchResource"}, + {"shape":"InvalidArgument"} + ], + "documentation":"

Gets a list of distribution IDs for distributions that have a cache behavior that references the specified key group.

You can optionally specify the maximum number of items to receive in the response. If the total number of items in the list exceeds the maximum that you specify, or the default maximum, the response is paginated. To get the next page of items, send a subsequent request that specifies the NextMarker value from the current response as the Marker value in the subsequent request.

" + }, "ListDistributionsByOriginRequestPolicyId":{ "name":"ListDistributionsByOriginRequestPolicyId2020_05_31", "http":{ @@ -945,6 +1027,19 @@ ], "documentation":"

Lists invalidation batches.

" }, + "ListKeyGroups":{ + "name":"ListKeyGroups2020_05_31", + "http":{ + "method":"GET", + "requestUri":"/2020-05-31/key-group" + }, + "input":{"shape":"ListKeyGroupsRequest"}, + "output":{"shape":"ListKeyGroupsResult"}, + "errors":[ + {"shape":"InvalidArgument"} + ], + "documentation":"

Gets a list of key groups.

You can optionally specify the maximum number of items to receive in the response. If the total number of items in the list exceeds the maximum that you specify, or the default maximum, the response is paginated. To get the next page of items, send a subsequent request that specifies the NextMarker value from the current response as the Marker value in the subsequent request.

" + }, "ListOriginRequestPolicies":{ "name":"ListOriginRequestPolicies2020_05_31", "http":{ @@ -981,6 +1076,11 @@ }, "input":{"shape":"ListRealtimeLogConfigsRequest"}, "output":{"shape":"ListRealtimeLogConfigsResult"}, + "errors":[ + {"shape":"InvalidArgument"}, + {"shape":"AccessDenied"}, + {"shape":"NoSuchRealtimeLogConfig"} + ], "documentation":"

Gets a list of real-time log configurations.

You can optionally specify the maximum number of items to receive in the response. If the total number of items in the list exceeds the maximum that you specify, or the default maximum, the response is paginated. To get the next page of items, send a subsequent request that specifies the NextMarker value from the current response as the Marker value in the subsequent request.

" }, "ListStreamingDistributions":{ @@ -1144,7 +1244,10 @@ {"shape":"NoSuchCachePolicy"}, {"shape":"TooManyDistributionsAssociatedToCachePolicy"}, {"shape":"NoSuchOriginRequestPolicy"}, - {"shape":"TooManyDistributionsAssociatedToOriginRequestPolicy"} + {"shape":"TooManyDistributionsAssociatedToOriginRequestPolicy"}, + {"shape":"TooManyDistributionsAssociatedToKeyGroup"}, + {"shape":"TooManyKeyGroupsAssociatedToDistribution"}, + {"shape":"TrustedKeyGroupDoesNotExist"} ], "documentation":"

Updates the configuration for a web distribution.

When you update a distribution, there are more required fields than when you create a distribution. When you update your distribution by using this API action, follow the steps here to get the current configuration and then make your updates, to make sure that you include all of the required fields. To view a summary, see Required Fields for Create Distribution and Update Distribution in the Amazon CloudFront Developer Guide.

The update process includes getting the current distribution configuration, updating the XML document that is returned to make your changes, and then submitting an UpdateDistribution request to make the updates.

For information about updating a distribution using the CloudFront console instead, see Creating a Distribution in the Amazon CloudFront Developer Guide.

To update a web distribution using the CloudFront API

  1. Submit a GetDistributionConfig request to get the current configuration and an Etag header for the distribution.

    If you update the distribution again, you must get a new Etag header.

  2. Update the XML document that was returned in the response to your GetDistributionConfig request to include your changes.

    When you edit the XML file, be aware of the following:

    • You must strip out the ETag parameter that is returned.

    • Additional fields are required when you update a distribution. There may be fields included in the XML file for features that you haven't configured for your distribution. This is expected and required to successfully update the distribution.

    • You can't change the value of CallerReference. If you try to change this value, CloudFront returns an IllegalUpdate error.

    • The new configuration replaces the existing configuration; the values that you specify in an UpdateDistribution request are not merged into your existing configuration. When you add, delete, or replace values in an element that allows multiple values (for example, CNAME), you must specify all of the values that you want to appear in the updated distribution. In addition, you must update the corresponding Quantity element.

  3. Submit an UpdateDistribution request to update the configuration for your distribution:

    • In the request body, include the XML document that you updated in Step 2. The request body must include an XML document with a DistributionConfig element.

    • Set the value of the HTTP If-Match header to the value of the ETag header that CloudFront returned when you submitted the GetDistributionConfig request in Step 1.

  4. Review the response to the UpdateDistribution request to confirm that the configuration was successfully updated.

  5. Optional: Submit a GetDistribution request to confirm that your changes have propagated. When propagation is complete, the value of Status is Deployed.

" }, @@ -1195,6 +1298,24 @@ ], "documentation":"

Update a field-level encryption profile.

" }, + "UpdateKeyGroup":{ + "name":"UpdateKeyGroup2020_05_31", + "http":{ + "method":"PUT", + "requestUri":"/2020-05-31/key-group/{Id}" + }, + "input":{"shape":"UpdateKeyGroupRequest"}, + "output":{"shape":"UpdateKeyGroupResult"}, + "errors":[ + {"shape":"InvalidIfMatchVersion"}, + {"shape":"NoSuchResource"}, + {"shape":"PreconditionFailed"}, + {"shape":"KeyGroupAlreadyExists"}, + {"shape":"InvalidArgument"}, + {"shape":"TooManyPublicKeysInKeyGroup"} + ], + "documentation":"

Updates a key group.

When you update a key group, all the fields are updated with the values provided in the request. You cannot update some fields independent of others. To update a key group:

  1. Get the current key group with GetKeyGroup or GetKeyGroupConfig.

  2. Locally modify the fields in the key group that you want to update. For example, add or remove public key IDs.

  3. Call UpdateKeyGroup with the entire key group object, including the fields that you modified and those that you didn’t.

" + }, "UpdateOriginRequestPolicy":{ "name":"UpdateOriginRequestPolicy2020_05_31", "http":{ @@ -1251,7 +1372,8 @@ "output":{"shape":"UpdateRealtimeLogConfigResult"}, "errors":[ {"shape":"NoSuchRealtimeLogConfig"}, - {"shape":"InvalidArgument"} + {"shape":"InvalidArgument"}, + {"shape":"AccessDenied"} ], "documentation":"

Updates a real-time log configuration.

When you update a real-time log configuration, all the parameters are updated with the values provided in the request. You cannot update some parameters independent of others. To update a real-time log configuration:

  1. Call GetRealtimeLogConfig to get the current real-time log configuration.

  2. Locally modify the parameters in the real-time log configuration that you want to update.

  3. Call this API (UpdateRealtimeLogConfig) by providing the entire real-time log configuration, including the parameters that you modified and those that you didn’t.

You cannot update a real-time log configuration’s Name or ARN.

" }, @@ -1291,6 +1413,28 @@ "error":{"httpStatusCode":403}, "exception":true }, + "ActiveTrustedKeyGroups":{ + "type":"structure", + "required":[ + "Enabled", + "Quantity" + ], + "members":{ + "Enabled":{ + "shape":"boolean", + "documentation":"

This field is true if any of the key groups have public keys that CloudFront can use to verify the signatures of signed URLs and signed cookies. If not, this field is false.

" + }, + "Quantity":{ + "shape":"integer", + "documentation":"

The number of key groups in the list.

" + }, + "Items":{ + "shape":"KGKeyPairIdsList", + "documentation":"

A list of key groups, including the identifiers of the public keys in each key group that CloudFront can use to verify the signatures of signed URLs and signed cookies.

" + } + }, + "documentation":"

A list of key groups, and the public keys in each key group, that CloudFront can use to verify the signatures of signed URLs and signed cookies.

" + }, "ActiveTrustedSigners":{ "type":"structure", "required":[ @@ -1300,18 +1444,18 @@ "members":{ "Enabled":{ "shape":"boolean", - "documentation":"

Enabled is true if any of the AWS accounts listed in the TrustedSigners complex type for this distribution have active CloudFront key pairs. If not, Enabled is false.

" + "documentation":"

This field is true if any of the AWS accounts in the list have active CloudFront key pairs that CloudFront can use to verify the signatures of signed URLs and signed cookies. If not, this field is false.

" }, "Quantity":{ "shape":"integer", - "documentation":"

The number of trusted signers specified in the TrustedSigners complex type.

" + "documentation":"

The number of AWS accounts in the list.

" }, "Items":{ "shape":"SignerList", - "documentation":"

A complex type that contains one Signer complex type for each trusted signer that is specified in the TrustedSigners complex type.

" + "documentation":"

A list of AWS accounts and the identifiers of active CloudFront key pairs in each account that CloudFront can use to verify the signatures of signed URLs and signed cookies.

" } }, - "documentation":"

A complex type that lists the AWS accounts, if any, that you included in the TrustedSigners complex type for this distribution. These are the accounts that you want to allow to create signed URLs for private content.

The Signer complex type lists the AWS account number of the trusted signer or self if the signer is the AWS account that created the distribution. The Signer element also includes the IDs of any active CloudFront key pairs that are associated with the trusted signer's AWS account. If no KeyPairId element appears for a Signer, that signer can't create signed URLs.

For more information, see Serving Private Content through CloudFront in the Amazon CloudFront Developer Guide.

" + "documentation":"

A list of AWS accounts and the active CloudFront key pairs in each account that CloudFront can use to verify the signatures of signed URLs and signed cookies.

" }, "AliasICPRecordal":{ "type":"structure", @@ -1405,7 +1549,6 @@ "required":[ "PathPattern", "TargetOriginId", - "TrustedSigners", "ViewerProtocolPolicy" ], "members":{ @@ -1419,7 +1562,11 @@ }, "TrustedSigners":{ "shape":"TrustedSigners", - "documentation":"

A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content.

If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, see Serving Private Content with Signed URLs and Signed Cookies in the Amazon CloudFront Developer Guide.

If you don’t want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items.

To add, change, or remove one or more trusted signers, change Enabled to true (if it’s currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.

" + "documentation":"

We recommend using TrustedKeyGroups instead of TrustedSigners.

A list of AWS account IDs whose public keys CloudFront can use to validate signed URLs or signed cookies.

When a cache behavior contains trusted signers, CloudFront requires signed URLs or signed cookies for all requests that match the cache behavior. The URLs or cookies must be signed with the private key of a CloudFront key pair in the trusted signer’s AWS account. The signed URL or cookie contains information about which public key CloudFront should use to verify the signature. For more information, see Serving private content in the Amazon CloudFront Developer Guide.

" + }, + "TrustedKeyGroups":{ + "shape":"TrustedKeyGroups", + "documentation":"

A list of key groups that CloudFront can use to validate signed URLs or signed cookies.

When a cache behavior contains trusted key groups, CloudFront requires signed URLs or signed cookies for all requests that match the cache behavior. The URLs or cookies must be signed with a private key whose corresponding public key is in the key group. The signed URL or cookie contains information about which public key CloudFront should use to verify the signature. For more information, see Serving private content in the Amazon CloudFront Developer Guide.

" }, "ViewerProtocolPolicy":{ "shape":"ViewerProtocolPolicy", @@ -2211,6 +2358,41 @@ "documentation":"

The returned result of the corresponding request.

", "payload":"Invalidation" }, + "CreateKeyGroupRequest":{ + "type":"structure", + "required":["KeyGroupConfig"], + "members":{ + "KeyGroupConfig":{ + "shape":"KeyGroupConfig", + "documentation":"

A key group configuration.

", + "locationName":"KeyGroupConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2020-05-31/"} + } + }, + "payload":"KeyGroupConfig" + }, + "CreateKeyGroupResult":{ + "type":"structure", + "members":{ + "KeyGroup":{ + "shape":"KeyGroup", + "documentation":"

The key group that was just created.

" + }, + "Location":{ + "shape":"string", + "documentation":"

The URL of the key group.

", + "location":"header", + "locationName":"Location" + }, + "ETag":{ + "shape":"string", + "documentation":"

The identifier for this version of the key group.

", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"KeyGroup" + }, "CreateMonitoringSubscriptionRequest":{ "type":"structure", "required":[ @@ -2284,7 +2466,7 @@ "members":{ "PublicKeyConfig":{ "shape":"PublicKeyConfig", - "documentation":"

The request to add a public key to CloudFront.

", + "documentation":"

A CloudFront public key configuration.

", "locationName":"PublicKeyConfig", "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2020-05-31/"} } @@ -2296,17 +2478,17 @@ "members":{ "PublicKey":{ "shape":"PublicKey", - "documentation":"

Returned when you add a public key.

" + "documentation":"

The public key.

" }, "Location":{ "shape":"string", - "documentation":"

The fully qualified URI of the new public key resource just created.

", + "documentation":"

The URL of the public key.

", "location":"header", "locationName":"Location" }, "ETag":{ "shape":"string", - "documentation":"

The current version of the public key. For example: E2QWRUHAPOMQZL.

", + "documentation":"

The identifier for this version of the public key.

", "location":"header", "locationName":"ETag" } @@ -2522,7 +2704,6 @@ "type":"structure", "required":[ "TargetOriginId", - "TrustedSigners", "ViewerProtocolPolicy" ], "members":{ @@ -2532,7 +2713,11 @@ }, "TrustedSigners":{ "shape":"TrustedSigners", - "documentation":"

A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content.

If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, see Serving Private Content with Signed URLs and Signed Cookies in the Amazon CloudFront Developer Guide.

If you don’t want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items.

To add, change, or remove one or more trusted signers, change Enabled to true (if it’s currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.

" + "documentation":"

We recommend using TrustedKeyGroups instead of TrustedSigners.

A list of AWS account IDs whose public keys CloudFront can use to validate signed URLs or signed cookies.

When a cache behavior contains trusted signers, CloudFront requires signed URLs or signed cookies for all requests that match the cache behavior. The URLs or cookies must be signed with the private key of a CloudFront key pair in a trusted signer’s AWS account. The signed URL or cookie contains information about which public key CloudFront should use to verify the signature. For more information, see Serving private content in the Amazon CloudFront Developer Guide.

" + }, + "TrustedKeyGroups":{ + "shape":"TrustedKeyGroups", + "documentation":"

A list of key groups that CloudFront can use to validate signed URLs or signed cookies.

When a cache behavior contains trusted key groups, CloudFront requires signed URLs or signed cookies for all requests that match the cache behavior. The URLs or cookies must be signed with a private key whose corresponding public key is in the key group. The signed URL or cookie contains information about which public key CloudFront should use to verify the signature. For more information, see Serving private content in the Amazon CloudFront Developer Guide.

" }, "ViewerProtocolPolicy":{ "shape":"ViewerProtocolPolicy", @@ -2682,6 +2867,24 @@ } } }, + "DeleteKeyGroupRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "documentation":"

The identifier of the key group that you are deleting. To get the identifier, use ListKeyGroups.

", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "documentation":"

The version of the key group that you are deleting. The version is the key group’s ETag value. To get the ETag, use GetKeyGroup or GetKeyGroupConfig.

", + "location":"header", + "locationName":"If-Match" + } + } + }, "DeleteMonitoringSubscriptionRequest":{ "type":"structure", "required":["DistributionId"], @@ -2776,7 +2979,6 @@ "LastModifiedTime", "InProgressInvalidationBatches", "DomainName", - "ActiveTrustedSigners", "DistributionConfig" ], "members":{ @@ -2806,7 +3008,11 @@ }, "ActiveTrustedSigners":{ "shape":"ActiveTrustedSigners", - "documentation":"

CloudFront automatically adds this element to the response only if you've set up the distribution to serve private content with signed URLs. The element lists the key pair IDs that CloudFront is aware of for each trusted signer. The Signer child element lists the AWS account number of the trusted signer (or an empty Self element if the signer is you). The Signer element also includes the IDs of any active key pairs associated with the trusted signer's AWS account. If no KeyPairId element appears for a Signer, that signer can't create working signed URLs.

" + "documentation":"

We recommend using TrustedKeyGroups instead of TrustedSigners.

CloudFront automatically adds this field to the response if you’ve configured a cache behavior in this distribution to serve private content using trusted signers. This field contains a list of AWS account IDs and the active CloudFront key pairs in each account that CloudFront can use to verify the signatures of signed URLs or signed cookies.

" + }, + "ActiveTrustedKeyGroups":{ + "shape":"ActiveTrustedKeyGroups", + "documentation":"

CloudFront automatically adds this field to the response if you’ve configured a cache behavior in this distribution to serve private content using key groups. This field contains a list of key groups and the public keys in each key group that CloudFront can use to verify the signatures of signed URLs or signed cookies.

" }, "DistributionConfig":{ "shape":"DistributionConfig", @@ -3880,6 +4086,62 @@ "documentation":"

The returned result of the corresponding request.

", "payload":"Invalidation" }, + "GetKeyGroupConfigRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "documentation":"

The identifier of the key group whose configuration you are getting. To get the identifier, use ListKeyGroups.

", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetKeyGroupConfigResult":{ + "type":"structure", + "members":{ + "KeyGroupConfig":{ + "shape":"KeyGroupConfig", + "documentation":"

The key group configuration.

" + }, + "ETag":{ + "shape":"string", + "documentation":"

The identifier for this version of the key group.

", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"KeyGroupConfig" + }, + "GetKeyGroupRequest":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"string", + "documentation":"

The identifier of the key group that you are getting. To get the identifier, use ListKeyGroups.

", + "location":"uri", + "locationName":"Id" + } + } + }, + "GetKeyGroupResult":{ + "type":"structure", + "members":{ + "KeyGroup":{ + "shape":"KeyGroup", + "documentation":"

The key group.

" + }, + "ETag":{ + "shape":"string", + "documentation":"

The identifier for this version of the key group.

", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"KeyGroup" + }, "GetMonitoringSubscriptionRequest":{ "type":"structure", "required":["DistributionId"], @@ -3964,7 +4226,7 @@ "members":{ "Id":{ "shape":"string", - "documentation":"

Request the ID for the public key configuration.

", + "documentation":"

The identifier of the public key whose configuration you are getting.

", "location":"uri", "locationName":"Id" } @@ -3975,11 +4237,11 @@ "members":{ "PublicKeyConfig":{ "shape":"PublicKeyConfig", - "documentation":"

Return the result for the public key configuration.

" + "documentation":"

A public key configuration.

" }, "ETag":{ "shape":"string", - "documentation":"

The current version of the public key configuration. For example: E2QWRUHAPOMQZL.

", + "documentation":"

The identifier for this version of the public key configuration.

", "location":"header", "locationName":"ETag" } @@ -3992,7 +4254,7 @@ "members":{ "Id":{ "shape":"string", - "documentation":"

Request the ID for the public key.

", + "documentation":"

The identifier of the public key you are getting.

", "location":"uri", "locationName":"Id" } @@ -4003,11 +4265,11 @@ "members":{ "PublicKey":{ "shape":"PublicKey", - "documentation":"

Return the public key.

" + "documentation":"

The public key.

" }, "ETag":{ "shape":"string", - "documentation":"

The current version of the public key. For example: E2QWRUHAPOMQZL.

", + "documentation":"

The identifier for this version of the public key.

", "location":"header", "locationName":"ETag" } @@ -4496,6 +4758,122 @@ "all" ] }, + "KGKeyPairIds":{ + "type":"structure", + "members":{ + "KeyGroupId":{ + "shape":"string", + "documentation":"

The identifier of the key group that contains the public keys.

" + }, + "KeyPairIds":{"shape":"KeyPairIds"} + }, + "documentation":"

A list of identifiers for the public keys that CloudFront can use to verify the signatures of signed URLs and signed cookies.

" + }, + "KGKeyPairIdsList":{ + "type":"list", + "member":{ + "shape":"KGKeyPairIds", + "locationName":"KeyGroup" + } + }, + "KeyGroup":{ + "type":"structure", + "required":[ + "Id", + "LastModifiedTime", + "KeyGroupConfig" + ], + "members":{ + "Id":{ + "shape":"string", + "documentation":"

The identifier for the key group.

" + }, + "LastModifiedTime":{ + "shape":"timestamp", + "documentation":"

The date and time when the key group was last modified.

" + }, + "KeyGroupConfig":{ + "shape":"KeyGroupConfig", + "documentation":"

The key group configuration.

" + } + }, + "documentation":"

A key group.

A key group contains a list of public keys that you can use with CloudFront signed URLs and signed cookies.

" + }, + "KeyGroupAlreadyExists":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

A key group with this name already exists. You must provide a unique name. To modify an existing key group, use UpdateKeyGroup.

", + "error":{"httpStatusCode":409}, + "exception":true + }, + "KeyGroupConfig":{ + "type":"structure", + "required":[ + "Name", + "Items" + ], + "members":{ + "Name":{ + "shape":"string", + "documentation":"

A name to identify the key group.

" + }, + "Items":{ + "shape":"PublicKeyIdList", + "documentation":"

A list of the identifiers of the public keys in the key group.

" + }, + "Comment":{ + "shape":"string", + "documentation":"

A comment to describe the key group.

" + } + }, + "documentation":"

A key group configuration.

A key group contains a list of public keys that you can use with CloudFront signed URLs and signed cookies.

" + }, + "KeyGroupList":{ + "type":"structure", + "required":[ + "MaxItems", + "Quantity" + ], + "members":{ + "NextMarker":{ + "shape":"string", + "documentation":"

If there are more items in the list than are in this response, this element is present. It contains the value that you should use in the Marker field of a subsequent request to continue listing key groups.

" + }, + "MaxItems":{ + "shape":"integer", + "documentation":"

The maximum number of key groups requested.

" + }, + "Quantity":{ + "shape":"integer", + "documentation":"

The number of key groups returned in the response.

" + }, + "Items":{ + "shape":"KeyGroupSummaryList", + "documentation":"

A list of key groups.

" + } + }, + "documentation":"

A list of key groups.

" + }, + "KeyGroupSummary":{ + "type":"structure", + "required":["KeyGroup"], + "members":{ + "KeyGroup":{ + "shape":"KeyGroup", + "documentation":"

A key group.

" + } + }, + "documentation":"

Contains information about a key group.

" + }, + "KeyGroupSummaryList":{ + "type":"list", + "member":{ + "shape":"KeyGroupSummary", + "locationName":"KeyGroupSummary" + } + }, "KeyPairIdList":{ "type":"list", "member":{ @@ -4509,14 +4887,14 @@ "members":{ "Quantity":{ "shape":"integer", - "documentation":"

The number of active CloudFront key pairs for AwsAccountNumber.

For more information, see ActiveTrustedSigners.

" + "documentation":"

The number of key pair identifiers in the list.

" }, "Items":{ "shape":"KeyPairIdList", - "documentation":"

A complex type that lists the active CloudFront key pairs, if any, that are associated with AwsAccountNumber.

For more information, see ActiveTrustedSigners.

" + "documentation":"

A list of CloudFront key pair identifiers.

" } }, - "documentation":"

A complex type that lists the active CloudFront key pairs, if any, that are associated with AwsAccountNumber.

For more information, see ActiveTrustedSigners.

" + "documentation":"

A list of CloudFront key pair identifiers.

" }, "KinesisStreamConfig":{ "type":"structure", @@ -4677,6 +5055,37 @@ }, "payload":"DistributionIdList" }, + "ListDistributionsByKeyGroupRequest":{ + "type":"structure", + "required":["KeyGroupId"], + "members":{ + "Marker":{ + "shape":"string", + "documentation":"

Use this field when paginating results to indicate where to begin in your list of distribution IDs. The response includes distribution IDs in the list that occur after the marker. To get the next page of the list, set this field’s value to the value of NextMarker from the current page’s response.

", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "documentation":"

The maximum number of distribution IDs that you want in the response.

", + "location":"querystring", + "locationName":"MaxItems" + }, + "KeyGroupId":{ + "shape":"string", + "documentation":"

The ID of the key group whose associated distribution IDs you are listing.

", + "location":"uri", + "locationName":"KeyGroupId" + } + } + }, + "ListDistributionsByKeyGroupResult":{ + "type":"structure", + "members":{ + "DistributionIdList":{"shape":"DistributionIdList"} + }, + "payload":"DistributionIdList" + }, "ListDistributionsByOriginRequestPolicyIdRequest":{ "type":"structure", "required":["OriginRequestPolicyId"], @@ -4894,6 +5303,33 @@ "documentation":"

The returned result of the corresponding request.

", "payload":"InvalidationList" }, + "ListKeyGroupsRequest":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"string", + "documentation":"

Use this field when paginating results to indicate where to begin in your list of key groups. The response includes key groups in the list that occur after the marker. To get the next page of the list, set this field’s value to the value of NextMarker from the current page’s response.

", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"string", + "documentation":"

The maximum number of key groups that you want in the response.

", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListKeyGroupsResult":{ + "type":"structure", + "members":{ + "KeyGroupList":{ + "shape":"KeyGroupList", + "documentation":"

A list of key groups.

" + } + }, + "payload":"KeyGroupList" + }, "ListOriginRequestPoliciesRequest":{ "type":"structure", "members":{ @@ -5248,15 +5684,15 @@ }, "CustomHeaders":{ "shape":"CustomHeaders", - "documentation":"

A list of HTTP header names and values that CloudFront adds to requests it sends to the origin.

For more information, see Adding Custom Headers to Origin Requests in the Amazon CloudFront Developer Guide.

" + "documentation":"

A list of HTTP header names and values that CloudFront adds to the requests that it sends to the origin.

For more information, see Adding Custom Headers to Origin Requests in the Amazon CloudFront Developer Guide.

" }, "S3OriginConfig":{ "shape":"S3OriginConfig", - "documentation":"

Use this type to specify an origin that is an Amazon S3 bucket that is not configured with static website hosting. To specify any other type of origin, including an Amazon S3 bucket that is configured with static website hosting, use the CustomOriginConfig type instead.

" + "documentation":"

Use this type to specify an origin that is an Amazon S3 bucket that is not configured with static website hosting. To specify any other type of origin, including an Amazon S3 bucket that is configured with static website hosting, use the CustomOriginConfig type instead.

" }, "CustomOriginConfig":{ "shape":"CustomOriginConfig", - "documentation":"

Use this type to specify an origin that is a content container or HTTP server, including an Amazon S3 bucket that is configured with static website hosting. To specify an Amazon S3 bucket that is not configured with static website hosting, use the S3OriginConfig type instead.

" + "documentation":"

Use this type to specify an origin that is not an Amazon S3 bucket, with one exception. If the Amazon S3 bucket is configured with static website hosting, use this type. If the Amazon S3 bucket is not configured with static website hosting, use the S3OriginConfig type instead.

" }, "ConnectionAttempts":{ "shape":"integer", @@ -5265,9 +5701,13 @@ "ConnectionTimeout":{ "shape":"integer", "documentation":"

The number of seconds that CloudFront waits when trying to establish a connection to the origin. The minimum timeout is 1 second, the maximum is 10 seconds, and the default (if you don’t specify otherwise) is 10 seconds.

For more information, see Origin Connection Timeout in the Amazon CloudFront Developer Guide.

" + }, + "OriginShield":{ + "shape":"OriginShield", + "documentation":"

CloudFront Origin Shield. Using Origin Shield can help reduce the load on your origin.

For more information, see Using Origin Shield in the Amazon CloudFront Developer Guide.

" } }, - "documentation":"

An origin.

An origin is the location where content is stored, and from which CloudFront gets content to serve to viewers. To specify an origin:

  • Use the S3OriginConfig type to specify an Amazon S3 bucket that is not configured with static website hosting.

  • Use the CustomOriginConfig type to specify various other kinds of content containers or HTTP servers, including:

    • An Amazon S3 bucket that is configured with static website hosting

    • An Elastic Load Balancing load balancer

    • An AWS Elemental MediaPackage origin

    • An AWS Elemental MediaStore container

    • Any other HTTP server, running on an Amazon EC2 instance or any other kind of host

For the current maximum number of origins that you can specify per distribution, see General Quotas on Web Distributions in the Amazon CloudFront Developer Guide (quotas were formerly referred to as limits).

" + "documentation":"

An origin.

An origin is the location where content is stored, and from which CloudFront gets content to serve to viewers. To specify an origin:

  • Use S3OriginConfig to specify an Amazon S3 bucket that is not configured with static website hosting.

  • Use CustomOriginConfig to specify all other kinds of origins, including:

    • An Amazon S3 bucket that is configured with static website hosting

    • An Elastic Load Balancing load balancer

    • An AWS Elemental MediaPackage endpoint

    • An AWS Elemental MediaStore container

    • Any other HTTP server, running on an Amazon EC2 instance or any other kind of host

For the current maximum number of origins that you can specify per distribution, see General Quotas on Web Distributions in the Amazon CloudFront Developer Guide (quotas were formerly referred to as limits).

" }, "OriginCustomHeader":{ "type":"structure", @@ -5601,6 +6041,27 @@ "custom" ] }, + "OriginShield":{ + "type":"structure", + "required":["Enabled"], + "members":{ + "Enabled":{ + "shape":"boolean", + "documentation":"

A flag that specifies whether Origin Shield is enabled.

When it’s enabled, CloudFront routes all requests through Origin Shield, which can help protect your origin. When it’s disabled, CloudFront might send requests directly to your origin from multiple edge locations or regional edge caches.

" + }, + "OriginShieldRegion":{ + "shape":"OriginShieldRegion", + "documentation":"

The AWS Region for Origin Shield.

Specify the AWS Region that has the lowest latency to your origin. To specify a region, use the region code, not the region name. For example, specify the US East (Ohio) region as us-east-2.

When you enable CloudFront Origin Shield, you must specify the AWS Region for Origin Shield. For the list of AWS Regions that you can specify, and for help choosing the best Region for your origin, see Choosing the AWS Region for Origin Shield in the Amazon CloudFront Developer Guide.

" + } + }, + "documentation":"

CloudFront Origin Shield.

Using Origin Shield can help reduce the load on your origin. For more information, see Using Origin Shield in the Amazon CloudFront Developer Guide.

" + }, + "OriginShieldRegion":{ + "type":"string", + "max":32, + "min":1, + "pattern":"[a-z]{2}-[a-z]+-\\d" + }, "OriginSslProtocols":{ "type":"structure", "required":[ @@ -5628,14 +6089,14 @@ "members":{ "Quantity":{ "shape":"integer", - "documentation":"

The number of origins or origin groups for this distribution.

" + "documentation":"

The number of origins for this distribution.

" }, "Items":{ "shape":"OriginList", - "documentation":"

A complex type that contains origins or origin groups for this distribution.

" + "documentation":"

A list of origins.

" } }, - "documentation":"

A complex type that contains information about origins and origin groups for this distribution.

" + "documentation":"

Contains information about the origins for this distribution.

" }, "ParametersInCacheKeyAndForwardedToOrigin":{ "type":"structure", @@ -5648,11 +6109,11 @@ "members":{ "EnableAcceptEncodingGzip":{ "shape":"boolean", - "documentation":"

A flag that can affect whether the Accept-Encoding HTTP header is included in the cache key and included in requests that CloudFront sends to the origin.

This field is related to the EnableAcceptEncodingBrotli field. If one or both of these fields is true and the viewer request includes the Accept-Encoding header, then CloudFront does the following:

  • Normalizes the value of the viewer’s Accept-Encoding header

  • Includes the normalized header in the cache key

  • Includes the normalized header in the request to the origin, if a request is necessary

For more information, see Cache compressed objects in the Amazon CloudFront Developer Guide.

If you set this value to true, and this cache behavior also has an origin request policy attached, do not include the Accept-Encoding header in the origin request policy. CloudFront always includes the Accept-Encoding header in origin requests when the value of this field is true, so including this header in an origin request policy has no effect.

If both of these fields are false, then CloudFront treats the Accept-Encoding header the same as any other HTTP header in the viewer request. By default, it’s not included in the cache key and it’s not included in origin requests. In this case, you can manually add Accept-Encoding to the headers whitelist like any other HTTP header.

" + "documentation":"

A flag that can affect whether the Accept-Encoding HTTP header is included in the cache key and included in requests that CloudFront sends to the origin.

This field is related to the EnableAcceptEncodingBrotli field. If one or both of these fields is true and the viewer request includes the Accept-Encoding header, then CloudFront does the following:

  • Normalizes the value of the viewer’s Accept-Encoding header

  • Includes the normalized header in the cache key

  • Includes the normalized header in the request to the origin, if a request is necessary

For more information, see Compression support in the Amazon CloudFront Developer Guide.

If you set this value to true, and this cache behavior also has an origin request policy attached, do not include the Accept-Encoding header in the origin request policy. CloudFront always includes the Accept-Encoding header in origin requests when the value of this field is true, so including this header in an origin request policy has no effect.

If both of these fields are false, then CloudFront treats the Accept-Encoding header the same as any other HTTP header in the viewer request. By default, it’s not included in the cache key and it’s not included in origin requests. In this case, you can manually add Accept-Encoding to the headers whitelist like any other HTTP header.

" }, "EnableAcceptEncodingBrotli":{ "shape":"boolean", - "documentation":"

A flag that can affect whether the Accept-Encoding HTTP header is included in the cache key and included in requests that CloudFront sends to the origin.

This field is related to the EnableAcceptEncodingGzip field. If one or both of these fields is true and the viewer request includes the Accept-Encoding header, then CloudFront does the following:

  • Normalizes the value of the viewer’s Accept-Encoding header

  • Includes the normalized header in the cache key

  • Includes the normalized header in the request to the origin, if a request is necessary

For more information, see Cache compressed objects in the Amazon CloudFront Developer Guide.

If you set this value to true, and this cache behavior also has an origin request policy attached, do not include the Accept-Encoding header in the origin request policy. CloudFront always includes the Accept-Encoding header in origin requests when the value of this field is true, so including this header in an origin request policy has no effect.

If both of these fields are false, then CloudFront treats the Accept-Encoding header the same as any other HTTP header in the viewer request. By default, it’s not included in the cache key and it’s not included in origin requests. In this case, you can manually add Accept-Encoding to the headers whitelist like any other HTTP header.

" + "documentation":"

A flag that can affect whether the Accept-Encoding HTTP header is included in the cache key and included in requests that CloudFront sends to the origin.

This field is related to the EnableAcceptEncodingGzip field. If one or both of these fields is true and the viewer request includes the Accept-Encoding header, then CloudFront does the following:

  • Normalizes the value of the viewer’s Accept-Encoding header

  • Includes the normalized header in the cache key

  • Includes the normalized header in the request to the origin, if a request is necessary

For more information, see Compression support in the Amazon CloudFront Developer Guide.

If you set this value to true, and this cache behavior also has an origin request policy attached, do not include the Accept-Encoding header in the origin request policy. CloudFront always includes the Accept-Encoding header in origin requests when the value of this field is true, so including this header in an origin request policy has no effect.

If both of these fields are false, then CloudFront treats the Accept-Encoding header the same as any other HTTP header in the viewer request. By default, it’s not included in the cache key and it’s not included in origin requests. In this case, you can manually add Accept-Encoding to the headers whitelist like any other HTTP header.

" }, "HeadersConfig":{ "shape":"CachePolicyHeadersConfig", @@ -5718,18 +6179,18 @@ "members":{ "Id":{ "shape":"string", - "documentation":"

A unique ID assigned to a public key you've added to CloudFront.

" + "documentation":"

The identifier of the public key.

" }, "CreatedTime":{ "shape":"timestamp", - "documentation":"

A time you added a public key to CloudFront.

" + "documentation":"

The date and time when the public key was uploaded.

" }, "PublicKeyConfig":{ "shape":"PublicKeyConfig", - "documentation":"

A complex data type for a public key you add to CloudFront to use with features like field-level encryption.

" + "documentation":"

Configuration information about a public key that you can use with signed URLs and signed cookies, or with field-level encryption.

" } }, - "documentation":"

A complex data type of public keys you add to CloudFront to use with features like field-level encryption.

" + "documentation":"

A public key that you can use with signed URLs and signed cookies, or with field-level encryption.

" }, "PublicKeyAlreadyExists":{ "type":"structure", @@ -5750,22 +6211,29 @@ "members":{ "CallerReference":{ "shape":"string", - "documentation":"

A unique number that ensures that the request can't be replayed.

" + "documentation":"

A string included in the request to help make sure that the request can’t be replayed.

" }, "Name":{ "shape":"string", - "documentation":"

The name for a public key you add to CloudFront to use with features like field-level encryption.

" + "documentation":"

A name to help identify the public key.

" }, "EncodedKey":{ "shape":"string", - "documentation":"

The encoded public key that you want to add to CloudFront to use with features like field-level encryption.

" + "documentation":"

The public key that you can use with signed URLs and signed cookies, or with field-level encryption.

" }, "Comment":{ "shape":"string", - "documentation":"

An optional comment about a public key.

" + "documentation":"

A comment to describe the public key.

" } }, - "documentation":"

Information about a public key you add to CloudFront to use with features like field-level encryption.

" + "documentation":"

Configuration information about a public key that you can use with signed URLs and signed cookies, or with field-level encryption.

" + }, + "PublicKeyIdList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"PublicKey" + } }, "PublicKeyInUse":{ "type":"structure", @@ -5789,18 +6257,18 @@ }, "MaxItems":{ "shape":"integer", - "documentation":"

The maximum number of public keys you want in the response body.

" + "documentation":"

The maximum number of public keys you want in the response.

" }, "Quantity":{ "shape":"integer", - "documentation":"

The number of public keys you added to CloudFront to use with features like field-level encryption.

" + "documentation":"

The number of public keys in the list.

" }, "Items":{ "shape":"PublicKeySummaryList", - "documentation":"

An array of information about a public key you add to CloudFront to use with features like field-level encryption.

" + "documentation":"

A list of public keys.

" } }, - "documentation":"

A list of public keys you've added to CloudFront to use with features like field-level encryption.

" + "documentation":"

A list of public keys that you can use with signed URLs and signed cookies, or with field-level encryption.

" }, "PublicKeySummary":{ "type":"structure", @@ -5813,26 +6281,26 @@ "members":{ "Id":{ "shape":"string", - "documentation":"

ID for public key information summary.

" + "documentation":"

The identifier of the public key.

" }, "Name":{ "shape":"string", - "documentation":"

Name for public key information summary.

" + "documentation":"

A name to help identify the public key.

" }, "CreatedTime":{ "shape":"timestamp", - "documentation":"

Creation time for public key information summary.

" + "documentation":"

The date and time when the public key was uploaded.

" }, "EncodedKey":{ "shape":"string", - "documentation":"

Encoded key for public key information summary.

" + "documentation":"

The public key.

" }, "Comment":{ "shape":"string", - "documentation":"

Comment for public key information summary.

" + "documentation":"

A comment to describe the public key.

" } }, - "documentation":"

A complex data type for public key information.

" + "documentation":"

Contains information about a public key.

" }, "PublicKeySummaryList":{ "type":"list", @@ -6057,6 +6525,15 @@ "type":"string", "pattern":"arn:aws(-cn)?:cloudfront::[0-9]+:.*" }, + "ResourceInUse":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

Cannot delete this resource because it is in use.

", + "error":{"httpStatusCode":409}, + "exception":true + }, "Restrictions":{ "type":"structure", "required":["GeoRestriction"], @@ -6110,14 +6587,14 @@ "members":{ "AwsAccountNumber":{ "shape":"string", - "documentation":"

An AWS account that is included in the TrustedSigners complex type for this distribution. Valid values include:

  • self, which is the AWS account used to create the distribution.

  • An AWS account number.

" + "documentation":"

An AWS account number that contains active CloudFront key pairs that CloudFront can use to verify the signatures of signed URLs and signed cookies. If the AWS account that owns the key pairs is the same account that owns the CloudFront distribution, the value of this field is self.

" }, "KeyPairIds":{ "shape":"KeyPairIds", - "documentation":"

A complex type that lists the active CloudFront key pairs, if any, that are associated with AwsAccountNumber.

" + "documentation":"

A list of CloudFront key pair identifiers.

" } }, - "documentation":"

A complex type that lists the AWS accounts that were included in the TrustedSigners complex type, as well as their active CloudFront key pair IDs, if any.

" + "documentation":"

A list of AWS accounts and the active CloudFront key pairs in each account that CloudFront can use to verify the signatures of signed URLs and signed cookies.

" }, "SignerList":{ "type":"list", @@ -6605,6 +7082,15 @@ "error":{"httpStatusCode":400}, "exception":true }, + "TooManyDistributionsAssociatedToKeyGroup":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

The number of distributions that reference this key group is more than the maximum allowed. For more information, see Quotas (formerly known as limits) in the Amazon CloudFront Developer Guide.

", + "error":{"httpStatusCode":400}, + "exception":true + }, "TooManyDistributionsAssociatedToOriginRequestPolicy":{ "type":"structure", "members":{ @@ -6722,6 +7208,24 @@ "error":{"httpStatusCode":400}, "exception":true }, + "TooManyKeyGroups":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

You have reached the maximum number of key groups for this AWS account. For more information, see Quotas (formerly known as limits) in the Amazon CloudFront Developer Guide.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "TooManyKeyGroupsAssociatedToDistribution":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

The number of key groups referenced by this distribution is more than the maximum allowed. For more information, see Quotas (formerly known as limits) in the Amazon CloudFront Developer Guide.

", + "error":{"httpStatusCode":400}, + "exception":true + }, "TooManyLambdaFunctionAssociations":{ "type":"structure", "members":{ @@ -6776,6 +7280,15 @@ "error":{"httpStatusCode":400}, "exception":true }, + "TooManyPublicKeysInKeyGroup":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

The number of public keys in this key group is more than the maximum allowed. For more information, see Quotas (formerly known as limits) in the Amazon CloudFront Developer Guide.

", + "error":{"httpStatusCode":400}, + "exception":true + }, "TooManyQueryStringParameters":{ "type":"structure", "members":{ @@ -6839,6 +7352,44 @@ "error":{"httpStatusCode":400}, "exception":true }, + "TrustedKeyGroupDoesNotExist":{ + "type":"structure", + "members":{ + "Message":{"shape":"string"} + }, + "documentation":"

The specified key group does not exist.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "TrustedKeyGroupIdList":{ + "type":"list", + "member":{ + "shape":"string", + "locationName":"KeyGroup" + } + }, + "TrustedKeyGroups":{ + "type":"structure", + "required":[ + "Enabled", + "Quantity" + ], + "members":{ + "Enabled":{ + "shape":"boolean", + "documentation":"

This field is true if any of the key groups in the list have public keys that CloudFront can use to verify the signatures of signed URLs and signed cookies. If not, this field is false.

" + }, + "Quantity":{ + "shape":"integer", + "documentation":"

The number of key groups in the list.

" + }, + "Items":{ + "shape":"TrustedKeyGroupIdList", + "documentation":"

A list of key groups identifiers.

" + } + }, + "documentation":"

A list of key groups whose public keys CloudFront can use to verify the signatures of signed URLs and signed cookies.

" + }, "TrustedSignerDoesNotExist":{ "type":"structure", "members":{ @@ -6857,18 +7408,18 @@ "members":{ "Enabled":{ "shape":"boolean", - "documentation":"

Specifies whether you want to require viewers to use signed URLs to access the files specified by PathPattern and TargetOriginId.

" + "documentation":"

This field is true if any of the AWS accounts have public keys that CloudFront can use to verify the signatures of signed URLs and signed cookies. If not, this field is false.

" }, "Quantity":{ "shape":"integer", - "documentation":"

The number of trusted signers for this cache behavior.

" + "documentation":"

The number of AWS accounts in the list.

" }, "Items":{ "shape":"AwsAccountNumberList", - "documentation":"

Optional: A complex type that contains trusted signers for this cache behavior. If Quantity is 0, you can omit Items.

" + "documentation":"

A list of AWS account identifiers.

" } }, - "documentation":"

A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content.

If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, see Serving Private Content through CloudFront in the Amazon CloudFront Developer Guide.

If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items.

To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.

For more information about updating the distribution configuration, see DistributionConfig in the Amazon CloudFront API Reference.

" + "documentation":"

A list of AWS accounts whose public keys CloudFront can use to verify the signatures of signed URLs and signed cookies.

" }, "UntagResourceRequest":{ "type":"structure", @@ -7117,6 +7668,50 @@ }, "payload":"FieldLevelEncryptionProfile" }, + "UpdateKeyGroupRequest":{ + "type":"structure", + "required":[ + "KeyGroupConfig", + "Id" + ], + "members":{ + "KeyGroupConfig":{ + "shape":"KeyGroupConfig", + "documentation":"

The key group configuration.

", + "locationName":"KeyGroupConfig", + "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2020-05-31/"} + }, + "Id":{ + "shape":"string", + "documentation":"

The identifier of the key group that you are updating.

", + "location":"uri", + "locationName":"Id" + }, + "IfMatch":{ + "shape":"string", + "documentation":"

The version of the key group that you are updating. The version is the key group’s ETag value.

", + "location":"header", + "locationName":"If-Match" + } + }, + "payload":"KeyGroupConfig" + }, + "UpdateKeyGroupResult":{ + "type":"structure", + "members":{ + "KeyGroup":{ + "shape":"KeyGroup", + "documentation":"

The key group that was just updated.

" + }, + "ETag":{ + "shape":"string", + "documentation":"

The identifier for this version of the key group.

", + "location":"header", + "locationName":"ETag" + } + }, + "payload":"KeyGroup" + }, "UpdateOriginRequestPolicyRequest":{ "type":"structure", "required":[ @@ -7170,13 +7765,13 @@ "members":{ "PublicKeyConfig":{ "shape":"PublicKeyConfig", - "documentation":"

Request to update public key information.

", + "documentation":"

A public key configuration.

", "locationName":"PublicKeyConfig", "xmlNamespace":{"uri":"http://cloudfront.amazonaws.com/doc/2020-05-31/"} }, "Id":{ "shape":"string", - "documentation":"

ID of the public key to be updated.

", + "documentation":"

The identifier of the public key that you are updating.

", "location":"uri", "locationName":"Id" }, @@ -7194,11 +7789,11 @@ "members":{ "PublicKey":{ "shape":"PublicKey", - "documentation":"

Return the results of updating the public key.

" + "documentation":"

The public key.

" }, "ETag":{ "shape":"string", - "documentation":"

The current version of the update public key result. For example: E2QWRUHAPOMQZL.

", + "documentation":"

The identifier of the current version of the public key.

", "location":"header", "locationName":"ETag" } diff --git a/services/cloudhsm/pom.xml b/services/cloudhsm/pom.xml index 45eb0aad2831..c387ef3b7a16 100644 --- a/services/cloudhsm/pom.xml +++ b/services/cloudhsm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT cloudhsm AWS Java SDK :: Services :: AWS CloudHSM diff --git a/services/cloudhsmv2/pom.xml b/services/cloudhsmv2/pom.xml index 657adc497f88..23917d95aa2c 100644 --- a/services/cloudhsmv2/pom.xml +++ b/services/cloudhsmv2/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT 4.0.0 cloudhsmv2 diff --git a/services/cloudhsmv2/src/main/resources/codegen-resources/service-2.json b/services/cloudhsmv2/src/main/resources/codegen-resources/service-2.json index 00344f7caacc..02c5960077be 100644 --- a/services/cloudhsmv2/src/main/resources/codegen-resources/service-2.json +++ b/services/cloudhsmv2/src/main/resources/codegen-resources/service-2.json @@ -189,6 +189,40 @@ ], "documentation":"

Gets a list of tags for the specified AWS CloudHSM cluster.

This is a paginated operation, which means that each response might contain only a subset of all the tags. When the response contains only a subset of tags, it includes a NextToken value. Use this value in a subsequent ListTags request to get more tags. When you receive a response with no NextToken (or an empty or null value), that means there are no more tags to get.

" }, + "ModifyBackupAttributes":{ + "name":"ModifyBackupAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyBackupAttributesRequest"}, + "output":{"shape":"ModifyBackupAttributesResponse"}, + "errors":[ + {"shape":"CloudHsmAccessDeniedException"}, + {"shape":"CloudHsmInternalFailureException"}, + {"shape":"CloudHsmInvalidRequestException"}, + {"shape":"CloudHsmResourceNotFoundException"}, + {"shape":"CloudHsmServiceException"} + ], + "documentation":"

Modifies attributes for AWS CloudHSM backup.

" + }, + "ModifyCluster":{ + "name":"ModifyCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyClusterRequest"}, + "output":{"shape":"ModifyClusterResponse"}, + "errors":[ + {"shape":"CloudHsmAccessDeniedException"}, + {"shape":"CloudHsmInternalFailureException"}, + {"shape":"CloudHsmInvalidRequestException"}, + {"shape":"CloudHsmResourceNotFoundException"}, + {"shape":"CloudHsmServiceException"} + ], + "documentation":"

Modifies AWS CloudHSM cluster.

" + }, "RestoreBackup":{ "name":"RestoreBackup", "http":{ @@ -268,6 +302,10 @@ "shape":"Timestamp", "documentation":"

The date and time when the backup was copied from a source backup.

" }, + "NeverExpires":{ + "shape":"Boolean", + "documentation":"

Specifies whether the service should exempt a backup from the retention policy for the cluster. True exempts a backup from the retention policy. False means the service applies the backup retention policy defined at the cluster.

" + }, "SourceRegion":{ "shape":"Region", "documentation":"

The AWS Region that contains the source backup from which the new backup was copied.

" @@ -289,7 +327,7 @@ "documentation":"

The list of tags for the backup.

" } }, - "documentation":"

Contains information about a backup of an AWS CloudHSM cluster. All backup objects contain the BackupId, BackupState, ClusterId, and CreateTimestamp parameters. Backups that were copied into a destination region additionally contain the CopyTimestamp, SourceBackup, SourceCluster, and SourceRegion paramters. A backup that is pending deletion will include the DeleteTimestamp parameter.

" + "documentation":"

Contains information about a backup of an AWS CloudHSM cluster. All backup objects contain the BackupId, BackupState, ClusterId, and CreateTimestamp parameters. Backups that were copied into a destination region additionally contain the CopyTimestamp, SourceBackup, SourceCluster, and SourceRegion parameters. A backup that is pending deletion will include the DeleteTimestamp parameter.

" }, "BackupId":{ "type":"string", @@ -299,6 +337,30 @@ "type":"string", "enum":["DEFAULT"] }, + "BackupRetentionPolicy":{ + "type":"structure", + "members":{ + "Type":{ + "shape":"BackupRetentionType", + "documentation":"

The type of backup retention policy. For the DAYS type, the value is the number of days to retain backups.

" + }, + "Value":{ + "shape":"BackupRetentionValue", + "documentation":"

Use a value between 7 - 379.

" + } + }, + "documentation":"

A policy that defines the number of days to retain backups.

" + }, + "BackupRetentionType":{ + "type":"string", + "enum":["DAYS"] + }, + "BackupRetentionValue":{ + "type":"string", + "max":3, + "min":1, + "pattern":"[0-9]+" + }, "BackupState":{ "type":"string", "enum":[ @@ -312,6 +374,11 @@ "type":"list", "member":{"shape":"Backup"} }, + "BackupsMaxSize":{ + "type":"integer", + "max":50, + "min":1 + }, "Boolean":{"type":"boolean"}, "Cert":{ "type":"string", @@ -400,6 +467,10 @@ "shape":"BackupPolicy", "documentation":"

The cluster's backup policy.

" }, + "BackupRetentionPolicy":{ + "shape":"BackupRetentionPolicy", + "documentation":"

A policy that defines how the service retains backups.

" + }, "ClusterId":{ "shape":"ClusterId", "documentation":"

The cluster's identifier (ID).

" @@ -477,6 +548,11 @@ "type":"list", "member":{"shape":"Cluster"} }, + "ClustersMaxSize":{ + "type":"integer", + "max":25, + "min":1 + }, "CopyBackupToRegionRequest":{ "type":"structure", "required":[ @@ -510,13 +586,13 @@ "CreateClusterRequest":{ "type":"structure", "required":[ - "SubnetIds", - "HsmType" + "HsmType", + "SubnetIds" ], "members":{ - "SubnetIds":{ - "shape":"SubnetIds", - "documentation":"

The identifiers (IDs) of the subnets where you are creating the cluster. You must specify at least one subnet. If you specify multiple subnets, they must meet the following criteria:

  • All subnets must be in the same virtual private cloud (VPC).

  • You can specify only one subnet per Availability Zone.

" + "BackupRetentionPolicy":{ + "shape":"BackupRetentionPolicy", + "documentation":"

A policy that defines how the service retains backups.

" }, "HsmType":{ "shape":"HsmType", @@ -526,6 +602,10 @@ "shape":"BackupId", "documentation":"

The identifier (ID) of the cluster backup to restore. Use this value to restore the cluster from a backup instead of creating a new cluster. To find the backup ID, use DescribeBackups.

" }, + "SubnetIds":{ + "shape":"SubnetIds", + "documentation":"

The identifiers (IDs) of the subnets where you are creating the cluster. You must specify at least one subnet. If you specify multiple subnets, they must meet the following criteria:

  • All subnets must be in the same virtual private cloud (VPC).

  • You can specify only one subnet per Availability Zone.

" + }, "TagList":{ "shape":"TagList", "documentation":"

Tags to apply to the CloudHSM cluster during creation.

" @@ -648,12 +728,12 @@ "documentation":"

The NextToken value that you received in the previous response. Use this value to get more backups.

" }, "MaxResults":{ - "shape":"MaxSize", + "shape":"BackupsMaxSize", "documentation":"

The maximum number of backups to return in the response. When there are more backups than the number you specify, the response contains a NextToken value.

" }, "Filters":{ "shape":"Filters", - "documentation":"

One or more filters to limit the items returned in the response.

Use the backupIds filter to return only the specified backups. Specify backups by their backup identifier (ID).

Use the sourceBackupIds filter to return only the backups created from a source backup. The sourceBackupID of a source backup is returned by the CopyBackupToRegion operation.

Use the clusterIds filter to return only the backups for the specified clusters. Specify clusters by their cluster identifier (ID).

Use the states filter to return only backups that match the specified state.

" + "documentation":"

One or more filters to limit the items returned in the response.

Use the backupIds filter to return only the specified backups. Specify backups by their backup identifier (ID).

Use the sourceBackupIds filter to return only the backups created from a source backup. The sourceBackupID of a source backup is returned by the CopyBackupToRegion operation.

Use the clusterIds filter to return only the backups for the specified clusters. Specify clusters by their cluster identifier (ID).

Use the states filter to return only backups that match the specified state.

Use the neverExpires filter to return backups filtered by the value in the neverExpires parameter. True returns all backups exempt from the backup retention policy. False returns all backups with a backup retention policy defined at the cluster.

" }, "SortAscending":{ "shape":"Boolean", @@ -686,7 +766,7 @@ "documentation":"

The NextToken value that you received in the previous response. Use this value to get more clusters.

" }, "MaxResults":{ - "shape":"MaxSize", + "shape":"ClustersMaxSize", "documentation":"

The maximum number of clusters to return in the response. When there are more clusters than the number you specify, the response contains a NextToken value.

" } } @@ -885,6 +965,52 @@ "max":100, "min":1 }, + "ModifyBackupAttributesRequest":{ + "type":"structure", + "required":[ + "BackupId", + "NeverExpires" + ], + "members":{ + "BackupId":{ + "shape":"BackupId", + "documentation":"

The identifier (ID) of the backup to modify. To find the ID of a backup, use the DescribeBackups operation.

" + }, + "NeverExpires":{ + "shape":"Boolean", + "documentation":"

Specifies whether the service should exempt a backup from the retention policy for the cluster. True exempts a backup from the retention policy. False means the service applies the backup retention policy defined at the cluster.

" + } + } + }, + "ModifyBackupAttributesResponse":{ + "type":"structure", + "members":{ + "Backup":{"shape":"Backup"} + } + }, + "ModifyClusterRequest":{ + "type":"structure", + "required":[ + "BackupRetentionPolicy", + "ClusterId" + ], + "members":{ + "BackupRetentionPolicy":{ + "shape":"BackupRetentionPolicy", + "documentation":"

A policy that defines how the service retains backups.

" + }, + "ClusterId":{ + "shape":"ClusterId", + "documentation":"

The identifier (ID) of the cluster that you want to modify. To find the cluster ID, use DescribeClusters.

" + } + } + }, + "ModifyClusterResponse":{ + "type":"structure", + "members":{ + "Cluster":{"shape":"Cluster"} + } + }, "NextToken":{ "type":"string", "max":256, diff --git a/services/cloudsearch/pom.xml b/services/cloudsearch/pom.xml index 0018c3da83a1..c000a3f73403 100644 --- a/services/cloudsearch/pom.xml +++ b/services/cloudsearch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT cloudsearch AWS Java SDK :: Services :: Amazon CloudSearch diff --git a/services/cloudsearchdomain/pom.xml b/services/cloudsearchdomain/pom.xml index 3ec9a4eb15b0..8a5b1457bc7f 100644 --- a/services/cloudsearchdomain/pom.xml +++ b/services/cloudsearchdomain/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT cloudsearchdomain AWS Java SDK :: Services :: Amazon CloudSearch Domain diff --git a/services/cloudtrail/pom.xml b/services/cloudtrail/pom.xml index 4f1139c0bb5f..d06973f96d64 100644 --- a/services/cloudtrail/pom.xml +++ b/services/cloudtrail/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT cloudtrail AWS Java SDK :: Services :: AWS CloudTrail diff --git a/services/cloudtrail/src/main/resources/codegen-resources/service-2.json b/services/cloudtrail/src/main/resources/codegen-resources/service-2.json index 9b2edf9a3cd6..99868dd2b6bc 100644 --- a/services/cloudtrail/src/main/resources/codegen-resources/service-2.json +++ b/services/cloudtrail/src/main/resources/codegen-resources/service-2.json @@ -249,7 +249,7 @@ {"shape":"UnsupportedOperationException"}, {"shape":"OperationNotPermittedException"} ], - "documentation":"

Looks up management events or CloudTrail Insights events that are captured by CloudTrail. You can look up events that occurred in a region within the last 90 days. Lookup supports the following attributes for management events:

  • AWS access key

  • Event ID

  • Event name

  • Event source

  • Read only

  • Resource name

  • Resource type

  • User name

Lookup supports the following attributes for Insights events:

  • Event ID

  • Event name

  • Event source

All attributes are optional. The default number of results returned is 50, with a maximum of 50 possible. The response includes a token that you can use to get the next page of results.

The rate of lookup requests is limited to two per second per account. If this limit is exceeded, a throttling error occurs.

", + "documentation":"

Looks up management events or CloudTrail Insights events that are captured by CloudTrail. You can look up events that occurred in a region within the last 90 days. Lookup supports the following attributes for management events:

  • AWS access key

  • Event ID

  • Event name

  • Event source

  • Read only

  • Resource name

  • Resource type

  • User name

Lookup supports the following attributes for Insights events:

  • Event ID

  • Event name

  • Event source

All attributes are optional. The default number of results returned is 50, with a maximum of 50 possible. The response includes a token that you can use to get the next page of results.

The rate of lookup requests is limited to two per second, per account, per region. If this limit is exceeded, a throttling error occurs.

", "idempotent":true }, "PutEventSelectors":{ @@ -419,6 +419,39 @@ }, "documentation":"

Returns the objects or data listed below if successful. Otherwise, returns an error.

" }, + "AdvancedEventSelector":{ + "type":"structure", + "required":[ + "Name", + "FieldSelectors" + ], + "members":{ + "Name":{"shape":"SelectorName"}, + "FieldSelectors":{"shape":"AdvancedFieldSelectors"} + } + }, + "AdvancedEventSelectors":{ + "type":"list", + "member":{"shape":"AdvancedEventSelector"} + }, + "AdvancedFieldSelector":{ + "type":"structure", + "required":["Field"], + "members":{ + "Field":{"shape":"SelectorField"}, + "Equals":{"shape":"Operator"}, + "StartsWith":{"shape":"Operator"}, + "EndsWith":{"shape":"Operator"}, + "NotEquals":{"shape":"Operator"}, + "NotStartsWith":{"shape":"Operator"}, + "NotEndsWith":{"shape":"Operator"} + } + }, + "AdvancedFieldSelectors":{ + "type":"list", + "member":{"shape":"AdvancedFieldSelector"}, + "min":1 + }, "Boolean":{"type":"boolean"}, "ByteBuffer":{"type":"blob"}, "CloudTrailARNInvalidException":{ @@ -675,7 +708,7 @@ }, "IncludeManagementEvents":{ "shape":"Boolean", - "documentation":"

Specify if you want your event selector to include management events for your trail.

For more information, see Management Events in the AWS CloudTrail User Guide.

By default, the value is true.

" + "documentation":"

Specify if you want your event selector to include management events for your trail.

For more information, see Management Events in the AWS CloudTrail User Guide.

By default, the value is true.

The first copy of management events is free. You are charged for additional copies of management events that you are logging on any subsequent trail in the same region. For more information about CloudTrail pricing, see AWS CloudTrail Pricing.

" }, "DataResources":{ "shape":"DataResources", @@ -720,7 +753,8 @@ "EventSelectors":{ "shape":"EventSelectors", "documentation":"

The event selectors that are configured for the trail.

" - } + }, + "AdvancedEventSelectors":{"shape":"AdvancedEventSelectors"} } }, "GetInsightSelectorsRequest":{ @@ -1045,7 +1079,7 @@ "type":"structure", "members":{ }, - "documentation":"

This exception is thrown when the KMS key does not exist, or when the S3 bucket and the KMS key are not in the same region.

", + "documentation":"

This exception is thrown when the KMS key does not exist, when the S3 bucket and the KMS key are not in the same region, or when the KMS key associated with the SNS topic either does not exist or is not in the same region.

", "exception":true }, "ListPublicKeysRequest":{ @@ -1237,6 +1271,17 @@ "documentation":"

This exception is thrown when the requested operation is not permitted.

", "exception":true }, + "Operator":{ + "type":"list", + "member":{"shape":"OperatorValue"}, + "min":1 + }, + "OperatorValue":{ + "type":"string", + "max":2048, + "min":1, + "pattern":".+" + }, "OrganizationNotInAllFeaturesModeException":{ "type":"structure", "members":{ @@ -1279,10 +1324,7 @@ }, "PutEventSelectorsRequest":{ "type":"structure", - "required":[ - "TrailName", - "EventSelectors" - ], + "required":["TrailName"], "members":{ "TrailName":{ "shape":"String", @@ -1291,7 +1333,8 @@ "EventSelectors":{ "shape":"EventSelectors", "documentation":"

Specifies the settings for your event selectors. You can configure up to five event selectors for a trail.

" - } + }, + "AdvancedEventSelectors":{"shape":"AdvancedEventSelectors"} } }, "PutEventSelectorsResponse":{ @@ -1304,7 +1347,8 @@ "EventSelectors":{ "shape":"EventSelectors", "documentation":"

Specifies the event selectors configured for your trail.

" - } + }, + "AdvancedEventSelectors":{"shape":"AdvancedEventSelectors"} } }, "PutInsightSelectorsRequest":{ @@ -1428,6 +1472,18 @@ "documentation":"

This exception is thrown when the specified S3 bucket does not exist.

", "exception":true }, + "SelectorField":{ + "type":"string", + "max":1000, + "min":1, + "pattern":"[\\w|\\d|\\.|_]+" + }, + "SelectorName":{ + "type":"string", + "max":1000, + "min":1, + "pattern":".+" + }, "StartLoggingRequest":{ "type":"structure", "required":["Name"], diff --git a/services/cloudwatch/pom.xml b/services/cloudwatch/pom.xml index 51a0099944af..492eada45706 100644 --- a/services/cloudwatch/pom.xml +++ b/services/cloudwatch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT cloudwatch AWS Java SDK :: Services :: Amazon CloudWatch diff --git a/services/cloudwatch/src/main/resources/codegen-resources/service-2.json b/services/cloudwatch/src/main/resources/codegen-resources/service-2.json index 70cfd28330c1..7496cbb8e9c8 100644 --- a/services/cloudwatch/src/main/resources/codegen-resources/service-2.json +++ b/services/cloudwatch/src/main/resources/codegen-resources/service-2.json @@ -121,7 +121,7 @@ "shape":"DescribeAlarmsForMetricOutput", "resultWrapper":"DescribeAlarmsForMetricResult" }, - "documentation":"

Retrieves the alarms for the specified metric. To filter the results, specify a statistic, period, or unit.

" + "documentation":"

Retrieves the alarms for the specified metric. To filter the results, specify a statistic, period, or unit.

This operation retrieves only standard alarms that are based on the specified metric. It does not return alarms based on math expressions that use the specified metric, or composite alarms that use the specified metric.

" }, "DescribeAnomalyDetectors":{ "name":"DescribeAnomalyDetectors", @@ -155,7 +155,7 @@ "errors":[ {"shape":"InvalidNextToken"} ], - "documentation":"

Returns a list of all the Contributor Insights rules in your account. All rules in your account are returned with a single operation.

For more information about Contributor Insights, see Using Contributor Insights to Analyze High-Cardinality Data.

" + "documentation":"

Returns a list of all the Contributor Insights rules in your account.

For more information about Contributor Insights, see Using Contributor Insights to Analyze High-Cardinality Data.

" }, "DisableAlarmActions":{ "name":"DisableAlarmActions", @@ -437,7 +437,7 @@ {"shape":"InvalidParameterCombinationException"}, {"shape":"InternalServiceFault"} ], - "documentation":"

Publishes metric data points to Amazon CloudWatch. CloudWatch associates the data points with the specified metric. If the specified metric does not exist, CloudWatch creates the metric. When CloudWatch creates a metric, it can take up to fifteen minutes for the metric to appear in calls to ListMetrics.

You can publish either individual data points in the Value field, or arrays of values and the number of times each value occurred during the period by using the Values and Counts fields in the MetricDatum structure. Using the Values and Counts method enables you to publish up to 150 values per metric with one PutMetricData request, and supports retrieving percentile statistics on this data.

Each PutMetricData request is limited to 40 KB in size for HTTP POST requests. You can send a payload compressed by gzip. Each request is also limited to no more than 20 different metrics.

Although the Value parameter accepts numbers of type Double, CloudWatch rejects values that are either too small or too large. Values must be in the range of -2^360 to 2^360. In addition, special values (for example, NaN, +Infinity, -Infinity) are not supported.

You can use up to 10 dimensions per metric to further clarify what data the metric collects. Each dimension consists of a Name and Value pair. For more information about specifying dimensions, see Publishing Metrics in the Amazon CloudWatch User Guide.

Data points with time stamps from 24 hours ago or longer can take at least 48 hours to become available for GetMetricData or GetMetricStatistics from the time they are submitted. Data points with time stamps between 3 and 24 hours ago can take as much as 2 hours to become available for for GetMetricData or GetMetricStatistics.

CloudWatch needs raw data points to calculate percentile statistics. If you publish data using a statistic set instead, you can only retrieve percentile statistics for this data if one of the following conditions is true:

  • The SampleCount value of the statistic set is 1 and Min, Max, and Sum are all equal.

  • The Min and Max are equal, and Sum is equal to Min multiplied by SampleCount.

" + "documentation":"

Publishes metric data points to Amazon CloudWatch. CloudWatch associates the data points with the specified metric. If the specified metric does not exist, CloudWatch creates the metric. When CloudWatch creates a metric, it can take up to fifteen minutes for the metric to appear in calls to ListMetrics.

You can publish either individual data points in the Value field, or arrays of values and the number of times each value occurred during the period by using the Values and Counts fields in the MetricDatum structure. Using the Values and Counts method enables you to publish up to 150 values per metric with one PutMetricData request, and supports retrieving percentile statistics on this data.

Each PutMetricData request is limited to 40 KB in size for HTTP POST requests. You can send a payload compressed by gzip. Each request is also limited to no more than 20 different metrics.

Although the Value parameter accepts numbers of type Double, CloudWatch rejects values that are either too small or too large. Values must be in the range of -2^360 to 2^360. In addition, special values (for example, NaN, +Infinity, -Infinity) are not supported.

You can use up to 10 dimensions per metric to further clarify what data the metric collects. Each dimension consists of a Name and Value pair. For more information about specifying dimensions, see Publishing Metrics in the Amazon CloudWatch User Guide.

You specify the time stamp to be associated with each data point. You can specify time stamps that are as much as two weeks before the current date, and as much as 2 hours after the current day and time.

Data points with time stamps from 24 hours ago or longer can take at least 48 hours to become available for GetMetricData or GetMetricStatistics from the time they are submitted. Data points with time stamps between 3 and 24 hours ago can take as much as 2 hours to become available for for GetMetricData or GetMetricStatistics.

CloudWatch needs raw data points to calculate percentile statistics. If you publish data using a statistic set instead, you can only retrieve percentile statistics for this data if one of the following conditions is true:

  • The SampleCount value of the statistic set is 1 and Min, Max, and Sum are all equal.

  • The Min and Max are equal, and Sum is equal to Min multiplied by SampleCount.

" }, "SetAlarmState":{ "name":"SetAlarmState", @@ -1176,11 +1176,11 @@ "members":{ "NextToken":{ "shape":"NextToken", - "documentation":"

Reserved for future use.

" + "documentation":"

Include this value, if it was returned by the previous operation, to get the next set of rules.

" }, "MaxResults":{ "shape":"InsightRuleMaxResults", - "documentation":"

This parameter is not currently used. Reserved for future use. If it is used in the future, the maximum value might be different.

" + "documentation":"

The maximum number of results to return in one operation. If you omit this parameter, the default of 500 is used.

" } } }, @@ -1189,7 +1189,7 @@ "members":{ "NextToken":{ "shape":"NextToken", - "documentation":"

Reserved for future use.

" + "documentation":"

If this parameter is present, it is a token that marks the start of the next batch of returned results.

" }, "InsightRules":{ "shape":"InsightRules", @@ -1210,7 +1210,7 @@ }, "Value":{ "shape":"DimensionValue", - "documentation":"

The value of the dimension.

" + "documentation":"

The value of the dimension. Dimension values cannot contain blank spaces or non-ASCII characters.

" } }, "documentation":"

A dimension is a name/value pair that is part of the identity of a metric. You can assign up to 10 dimensions to a metric. Because dimensions are part of the unique identifier for a metric, whenever you add a unique name/value pair to one of your metrics, you are creating a new variation of that metric.

", @@ -1458,7 +1458,7 @@ }, "NextToken":{ "shape":"NextToken", - "documentation":"

Include this value, if it was returned by the previous call, to get the next set of data points.

" + "documentation":"

Include this value, if it was returned by the previous GetMetricData operation, to get the next set of data points.

" }, "ScanBy":{ "shape":"ScanBy", @@ -1914,15 +1914,15 @@ "members":{ "Namespace":{ "shape":"Namespace", - "documentation":"

The namespace to filter against.

" + "documentation":"

The metric namespace to filter against. Only the namespace that matches exactly will be returned.

" }, "MetricName":{ "shape":"MetricName", - "documentation":"

The name of the metric to filter against.

" + "documentation":"

The name of the metric to filter against. Only the metrics with names that match exactly will be returned.

" }, "Dimensions":{ "shape":"DimensionFilters", - "documentation":"

The dimensions to filter against.

" + "documentation":"

The dimensions to filter against. Only the dimensions that match exactly will be returned.

" }, "NextToken":{ "shape":"NextToken", @@ -2124,7 +2124,7 @@ }, "Metrics":{ "shape":"MetricDataQueries", - "documentation":"

An array of MetricDataQuery structures, used in an alarm based on a metric math expression. Each structure either retrieves a metric or performs a math expression. One item in the Metrics array is the math expression that the alarm watches. This expression by designated by having ReturnValue set to true.

" + "documentation":"

An array of MetricDataQuery structures, used in an alarm based on a metric math expression. Each structure either retrieves a metric or performs a math expression. One item in the Metrics array is the math expression that the alarm watches. This expression by designated by having ReturnData set to true.

" }, "ThresholdMetricId":{ "shape":"MetricId", @@ -2412,7 +2412,7 @@ }, "Configuration":{ "shape":"AnomalyDetectorConfiguration", - "documentation":"

The configuration specifies details about how the anomaly detection model is to be trained, including time ranges to exclude when training and updating the model. You can specify as many as 10 time ranges.

The configuration can also include the time zone to use for the metric.

You can in

" + "documentation":"

The configuration specifies details about how the anomaly detection model is to be trained, including time ranges to exclude when training and updating the model. You can specify as many as 10 time ranges.

The configuration can also include the time zone to use for the metric.

" } } }, @@ -2604,7 +2604,7 @@ }, "Metrics":{ "shape":"MetricDataQueries", - "documentation":"

An array of MetricDataQuery structures that enable you to create an alarm based on the result of a metric math expression. For each PutMetricAlarm operation, you must specify either MetricName or a Metrics array.

Each item in the Metrics array either retrieves a metric or performs a math expression.

One item in the Metrics array is the expression that the alarm watches. You designate this expression by setting ReturnValue to true for this object in the array. For more information, see MetricDataQuery.

If you use the Metrics parameter, you cannot include the MetricName, Dimensions, Period, Namespace, Statistic, or ExtendedStatistic parameters of PutMetricAlarm in the same operation. Instead, you retrieve the metrics you are using in your math expression as part of the Metrics array.

" + "documentation":"

An array of MetricDataQuery structures that enable you to create an alarm based on the result of a metric math expression. For each PutMetricAlarm operation, you must specify either MetricName or a Metrics array.

Each item in the Metrics array either retrieves a metric or performs a math expression.

One item in the Metrics array is the expression that the alarm watches. You designate this expression by setting ReturnData to true for this object in the array. For more information, see MetricDataQuery.

If you use the Metrics parameter, you cannot include the MetricName, Dimensions, Period, Namespace, Statistic, or ExtendedStatistic parameters of PutMetricAlarm in the same operation. Instead, you retrieve the metrics you are using in your math expression as part of the Metrics array.

" }, "Tags":{ "shape":"TagList", diff --git a/services/cloudwatchevents/pom.xml b/services/cloudwatchevents/pom.xml index 1e1c8f0ebc60..19b79b407884 100644 --- a/services/cloudwatchevents/pom.xml +++ b/services/cloudwatchevents/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT cloudwatchevents AWS Java SDK :: Services :: Amazon CloudWatch Events diff --git a/services/cloudwatchevents/src/main/resources/codegen-resources/service-2.json b/services/cloudwatchevents/src/main/resources/codegen-resources/service-2.json index 2c9b6af2f718..fe273a226c05 100644 --- a/services/cloudwatchevents/src/main/resources/codegen-resources/service-2.json +++ b/services/cloudwatchevents/src/main/resources/codegen-resources/service-2.json @@ -28,6 +28,40 @@ ], "documentation":"

Activates a partner event source that has been deactivated. Once activated, your matching event bus will start receiving events from the event source.

" }, + "CancelReplay":{ + "name":"CancelReplay", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelReplayRequest"}, + "output":{"shape":"CancelReplayResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"IllegalStatusException"}, + {"shape":"InternalException"} + ], + "documentation":"

Cancels the specified replay.

" + }, + "CreateArchive":{ + "name":"CreateArchive", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateArchiveRequest"}, + "output":{"shape":"CreateArchiveResponse"}, + "errors":[ + {"shape":"ConcurrentModificationException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalException"}, + {"shape":"LimitExceededException"}, + {"shape":"InvalidEventPatternException"} + ], + "documentation":"

Creates an archive of events with the specified settings. When you create an archive, incoming events might not immediately start being sent to the archive. Allow a short period of time for changes to take effect. If you do not specify a pattern to filter events sent to the archive, all events are sent to the archive except replayed events. Replayed events are not sent to an archive.

" + }, "CreateEventBus":{ "name":"CreateEventBus", "http":{ @@ -80,6 +114,21 @@ ], "documentation":"

You can use this operation to temporarily stop receiving events from the specified partner event source. The matching event bus is not deleted.

When you deactivate a partner event source, the source goes into PENDING state. If it remains in PENDING state for more than two weeks, it is deleted.

To activate a deactivated partner event source, use ActivateEventSource.

" }, + "DeleteArchive":{ + "name":"DeleteArchive", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteArchiveRequest"}, + "output":{"shape":"DeleteArchiveResponse"}, + "errors":[ + {"shape":"ConcurrentModificationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalException"} + ], + "documentation":"

Deletes the specified archive.

" + }, "DeleteEventBus":{ "name":"DeleteEventBus", "http":{ @@ -122,6 +171,21 @@ ], "documentation":"

Deletes the specified rule.

Before you can delete the rule, you must remove all targets, using RemoveTargets.

When you delete a rule, incoming events might continue to match to the deleted rule. Allow a short period of time for changes to take effect.

Managed rules are rules created and managed by another AWS service on your behalf. These rules are created by those other AWS services to support functionality in those services. You can delete these rules using the Force option, but you should do so only if you are sure the other service is not still using that rule.

" }, + "DescribeArchive":{ + "name":"DescribeArchive", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeArchiveRequest"}, + "output":{"shape":"DescribeArchiveResponse"}, + "errors":[ + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalException"} + ], + "documentation":"

Retrieves details about an archive.

" + }, "DescribeEventBus":{ "name":"DescribeEventBus", "http":{ @@ -166,6 +230,20 @@ ], "documentation":"

An SaaS partner can use this operation to list details about a partner event source that they have created. AWS customers do not use this operation. Instead, AWS customers can use DescribeEventSource to see details about a partner event source that is shared with them.

" }, + "DescribeReplay":{ + "name":"DescribeReplay", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReplayRequest"}, + "output":{"shape":"DescribeReplayResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalException"} + ], + "documentation":"

Retrieves details about a replay. Use DescribeReplay to determine the progress of a running replay. A replay processes events to replay based on the time in the event, and replays them using 1 minute intervals. If you use StartReplay and specify an EventStartTime and an EventEndTime that covers a 20 minute time range, the events are replayed from the first minute of that 20 minute range first. Then the events from the second minute are replayed. You can use DescribeReplay to determine the progress of a replay. The value returned for EventLastReplayedTime indicates the time within the specified time range associated with the last event replayed.

" + }, "DescribeRule":{ "name":"DescribeRule", "http":{ @@ -210,6 +288,20 @@ ], "documentation":"

Enables the specified rule. If the rule does not exist, the operation fails.

When you enable a rule, incoming events might not immediately start matching to a newly enabled rule. Allow a short period of time for changes to take effect.

" }, + "ListArchives":{ + "name":"ListArchives", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListArchivesRequest"}, + "output":{"shape":"ListArchivesResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalException"} + ], + "documentation":"

Lists your archives. You can either list all the archives or you can provide a prefix to match to the archive names. Filter parameters are exclusive.

" + }, "ListEventBuses":{ "name":"ListEventBuses", "http":{ @@ -266,6 +358,19 @@ ], "documentation":"

An SaaS partner can use this operation to list all the partner event source names that they have created. This operation is not used by AWS customers.

" }, + "ListReplays":{ + "name":"ListReplays", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListReplaysRequest"}, + "output":{"shape":"ListReplaysResponse"}, + "errors":[ + {"shape":"InternalException"} + ], + "documentation":"

Lists your replays. You can either list all the replays or you can provide a prefix to match to the replay names. Filter parameters are exclusive.

" + }, "ListRuleNamesByTarget":{ "name":"ListRuleNamesByTarget", "http":{ @@ -360,7 +465,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"PolicyLengthExceededException"}, {"shape":"InternalException"}, - {"shape":"ConcurrentModificationException"} + {"shape":"ConcurrentModificationException"}, + {"shape":"OperationDisabledException"} ], "documentation":"

Running PutPermission permits the specified AWS account or AWS organization to put events to the specified event bus. Amazon EventBridge (CloudWatch Events) rules in your account are triggered by these events arriving to an event bus in your account.

For another account to send events to your account, that external account must have an EventBridge rule with your account's event bus as a target.

To enable multiple AWS accounts to put events to your event bus, run PutPermission once for each of these accounts. Or, if all the accounts are members of the same AWS organization, you can run PutPermission once specifying Principal as \"*\" and specifying the AWS organization ID in Condition, to grant permissions to all accounts in that organization.

If you grant permissions using an organization, then accounts in that organization must specify a RoleArn with proper permissions when they use PutTarget to add your account's event bus as a target. For more information, see Sending and Receiving Events Between AWS Accounts in the Amazon EventBridge User Guide.

The permission policy on the default event bus cannot exceed 10 KB in size.

" }, @@ -409,7 +515,8 @@ "errors":[ {"shape":"ResourceNotFoundException"}, {"shape":"InternalException"}, - {"shape":"ConcurrentModificationException"} + {"shape":"ConcurrentModificationException"}, + {"shape":"OperationDisabledException"} ], "documentation":"

Revokes the permission of another AWS account to be able to put events to the specified event bus. Specify the account to revoke by the StatementId value that you associated with the account when you granted it permission with PutPermission. You can find the StatementId by using DescribeEventBus.

" }, @@ -429,6 +536,23 @@ ], "documentation":"

Removes the specified targets from the specified rule. When the rule is triggered, those targets are no longer be invoked.

When you remove a target, when the associated rule triggers, removed targets might continue to be invoked. Allow a short period of time for changes to take effect.

This action can partially fail if too many requests are made at the same time. If that happens, FailedEntryCount is non-zero in the response and each entry in FailedEntries provides the ID of the failed target and the error code.

" }, + "StartReplay":{ + "name":"StartReplay", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartReplayRequest"}, + "output":{"shape":"StartReplayResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"InvalidEventPatternException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalException"} + ], + "documentation":"

Starts the specified replay. Events are not necessarily replayed in the exact same order that they were added to the archive. A replay processes events to replay based on the time in the event, and replays them using 1 minute intervals. If you specify an EventStartTime and an EventEndTime that covers a 20 minute time range, the events are replayed from the first minute of that 20 minute range first. Then the events from the second minute are replayed. You can use DescribeReplay to determine the progress of a replay. The value returned for EventLastReplayedTime indicates the time within the specified time range associated with the last event replayed.

" + }, "TagResource":{ "name":"TagResource", "http":{ @@ -474,6 +598,23 @@ {"shape":"ManagedRuleException"} ], "documentation":"

Removes one or more tags from the specified EventBridge resource. In Amazon EventBridge (CloudWatch Events, rules and event buses can be tagged.

" + }, + "UpdateArchive":{ + "name":"UpdateArchive", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateArchiveRequest"}, + "output":{"shape":"UpdateArchiveResponse"}, + "errors":[ + {"shape":"ConcurrentModificationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalException"}, + {"shape":"LimitExceededException"}, + {"shape":"InvalidEventPatternException"} + ], + "documentation":"

Updates the specified archive.

" } }, "shapes":{ @@ -499,6 +640,81 @@ } } }, + "Archive":{ + "type":"structure", + "members":{ + "ArchiveName":{ + "shape":"ArchiveName", + "documentation":"

The name of the archive.

" + }, + "EventSourceArn":{ + "shape":"Arn", + "documentation":"

The ARN of the event bus associated with the archive. Only events from this event bus are sent to the archive.

" + }, + "State":{ + "shape":"ArchiveState", + "documentation":"

The current state of the archive.

" + }, + "StateReason":{ + "shape":"ArchiveStateReason", + "documentation":"

A description for the reason that the archive is in the current state.

" + }, + "RetentionDays":{ + "shape":"RetentionDays", + "documentation":"

The number of days to retain events in the archive before they are deleted.

" + }, + "SizeBytes":{ + "shape":"Long", + "documentation":"

The size of the archive, in bytes.

" + }, + "EventCount":{ + "shape":"Long", + "documentation":"

The number of events in the archive.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

The time stamp for the time that the archive was created.

" + } + }, + "documentation":"

An Archive object that contains details about an archive.

" + }, + "ArchiveArn":{ + "type":"string", + "max":1600, + "min":1, + "pattern":"^arn:aws([a-z]|\\-)*:events:([a-z]|\\d|\\-)*:([0-9]{12})?:.+\\/.+$" + }, + "ArchiveDescription":{ + "type":"string", + "max":512, + "pattern":".*" + }, + "ArchiveName":{ + "type":"string", + "max":48, + "min":1, + "pattern":"[\\.\\-_A-Za-z0-9]+" + }, + "ArchiveResponseList":{ + "type":"list", + "member":{"shape":"Archive"} + }, + "ArchiveState":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED", + "CREATING", + "UPDATING", + "CREATE_FAILED", + "UPDATE_FAILED" + ] + }, + "ArchiveStateReason":{ + "type":"string", + "max":512, + "pattern":".*" + }, "Arn":{ "type":"string", "max":1600, @@ -577,11 +793,38 @@ "documentation":"

The retry strategy to use for failed jobs, if the target is an AWS Batch job. If you specify a retry strategy here, it overrides the retry strategy defined in the job definition.

" }, "Boolean":{"type":"boolean"}, + "CancelReplayRequest":{ + "type":"structure", + "required":["ReplayName"], + "members":{ + "ReplayName":{ + "shape":"ReplayName", + "documentation":"

The name of the replay to cancel.

" + } + } + }, + "CancelReplayResponse":{ + "type":"structure", + "members":{ + "ReplayArn":{ + "shape":"ReplayArn", + "documentation":"

The ARN of the replay to cancel.

" + }, + "State":{ + "shape":"ReplayState", + "documentation":"

The current state of the replay.

" + }, + "StateReason":{ + "shape":"ReplayStateReason", + "documentation":"

The reason that the replay is in the current state.

" + } + } + }, "ConcurrentModificationException":{ "type":"structure", "members":{ }, - "documentation":"

There is concurrent modification on a rule or target.

", + "documentation":"

There is concurrent modification on a rule, target, archive, or replay.

", "exception":true }, "Condition":{ @@ -607,6 +850,56 @@ }, "documentation":"

A JSON string which you can use to limit the event bus permissions you are granting to only accounts that fulfill the condition. Currently, the only supported condition is membership in a certain AWS organization. The string must contain Type, Key, and Value fields. The Value field specifies the ID of the AWS organization. Following is an example value for Condition:

'{\"Type\" : \"StringEquals\", \"Key\": \"aws:PrincipalOrgID\", \"Value\": \"o-1234567890\"}'

" }, + "CreateArchiveRequest":{ + "type":"structure", + "required":[ + "ArchiveName", + "EventSourceArn" + ], + "members":{ + "ArchiveName":{ + "shape":"ArchiveName", + "documentation":"

The name for the archive to create.

" + }, + "EventSourceArn":{ + "shape":"Arn", + "documentation":"

The ARN of the event source associated with the archive.

" + }, + "Description":{ + "shape":"ArchiveDescription", + "documentation":"

A description for the archive.

" + }, + "EventPattern":{ + "shape":"EventPattern", + "documentation":"

An event pattern to use to filter events sent to the archive.

" + }, + "RetentionDays":{ + "shape":"RetentionDays", + "documentation":"

The number of days to retain events for. Default value is 0. If set to 0, events are retained indefinitely

" + } + } + }, + "CreateArchiveResponse":{ + "type":"structure", + "members":{ + "ArchiveArn":{ + "shape":"ArchiveArn", + "documentation":"

The ARN of the archive that was created.

" + }, + "State":{ + "shape":"ArchiveState", + "documentation":"

The state of the archive that was created.

" + }, + "StateReason":{ + "shape":"ArchiveStateReason", + "documentation":"

The reason that the archive is in the state.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

The time at which the archive was created.

" + } + } + }, "CreateEventBusRequest":{ "type":"structure", "required":["Name"], @@ -660,6 +953,11 @@ } } }, + "CreatedBy":{ + "type":"string", + "max":128, + "min":1 + }, "Database":{ "type":"string", "max":64, @@ -692,6 +990,21 @@ }, "documentation":"

A DeadLetterConfig object that contains information about a dead-letter queue configuration.

" }, + "DeleteArchiveRequest":{ + "type":"structure", + "required":["ArchiveName"], + "members":{ + "ArchiveName":{ + "shape":"ArchiveName", + "documentation":"

The name of the archive to delete.

" + } + } + }, + "DeleteArchiveResponse":{ + "type":"structure", + "members":{ + } + }, "DeleteEventBusRequest":{ "type":"structure", "required":["Name"], @@ -728,8 +1041,8 @@ "documentation":"

The name of the rule.

" }, "EventBusName":{ - "shape":"EventBusName", - "documentation":"

The event bus associated with the rule. If you omit this, the default event bus is used.

" + "shape":"EventBusNameOrArn", + "documentation":"

The name or ARN of the event bus associated with the rule. If you omit this, the default event bus is used.

" }, "Force":{ "shape":"Boolean", @@ -737,12 +1050,71 @@ } } }, + "DescribeArchiveRequest":{ + "type":"structure", + "required":["ArchiveName"], + "members":{ + "ArchiveName":{ + "shape":"ArchiveName", + "documentation":"

The name of the archive to retrieve.

" + } + } + }, + "DescribeArchiveResponse":{ + "type":"structure", + "members":{ + "ArchiveArn":{ + "shape":"ArchiveArn", + "documentation":"

The ARN of the archive.

" + }, + "ArchiveName":{ + "shape":"ArchiveName", + "documentation":"

The name of the archive.

" + }, + "EventSourceArn":{ + "shape":"Arn", + "documentation":"

The ARN of the event source associated with the archive.

" + }, + "Description":{ + "shape":"ArchiveDescription", + "documentation":"

The description of the archive.

" + }, + "EventPattern":{ + "shape":"EventPattern", + "documentation":"

The event pattern used to filter events sent to the archive.

" + }, + "State":{ + "shape":"ArchiveState", + "documentation":"

The state of the archive.

" + }, + "StateReason":{ + "shape":"ArchiveStateReason", + "documentation":"

The reason that the archive is in the state.

" + }, + "RetentionDays":{ + "shape":"RetentionDays", + "documentation":"

The number of days to retain events for in the archive.

" + }, + "SizeBytes":{ + "shape":"Long", + "documentation":"

The size of the archive in bytes.

" + }, + "EventCount":{ + "shape":"Long", + "documentation":"

The number of events in the archive.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

The time at which the archive was created.

" + } + } + }, "DescribeEventBusRequest":{ "type":"structure", "members":{ "Name":{ - "shape":"EventBusName", - "documentation":"

The name of the event bus to show details for. If you omit this, the default event bus is displayed.

" + "shape":"EventBusNameOrArn", + "documentation":"

The name or ARN of the event bus to show details for. If you omit this, the default event bus is displayed.

" } } }, @@ -825,6 +1197,69 @@ } } }, + "DescribeReplayRequest":{ + "type":"structure", + "required":["ReplayName"], + "members":{ + "ReplayName":{ + "shape":"ReplayName", + "documentation":"

The name of the replay to retrieve.

" + } + } + }, + "DescribeReplayResponse":{ + "type":"structure", + "members":{ + "ReplayName":{ + "shape":"ReplayName", + "documentation":"

The name of the replay.

" + }, + "ReplayArn":{ + "shape":"ReplayArn", + "documentation":"

The ARN of the replay.

" + }, + "Description":{ + "shape":"ReplayDescription", + "documentation":"

The description of the replay.

" + }, + "State":{ + "shape":"ReplayState", + "documentation":"

The current state of the replay.

" + }, + "StateReason":{ + "shape":"ReplayStateReason", + "documentation":"

The reason that the replay is in the current state.

" + }, + "EventSourceArn":{ + "shape":"Arn", + "documentation":"

The ARN of the archive events were replayed from.

" + }, + "Destination":{ + "shape":"ReplayDestination", + "documentation":"

A ReplayDestination object that contains details about the replay.

" + }, + "EventStartTime":{ + "shape":"Timestamp", + "documentation":"

The time stamp of the first event that was last replayed from the archive.

" + }, + "EventEndTime":{ + "shape":"Timestamp", + "documentation":"

The time stamp for the last event that was replayed from the archive.

" + }, + "EventLastReplayedTime":{ + "shape":"Timestamp", + "documentation":"

The time that the event was last replayed.

" + }, + "ReplayStartTime":{ + "shape":"Timestamp", + "documentation":"

A time stamp for the time that the replay started.

" + }, + "ReplayEndTime":{ + "shape":"Timestamp", + "documentation":"

A time stamp for the time that the replay stopped.

" + } + } + }, "DescribeRuleRequest":{ "type":"structure", "required":["Name"], @@ -834,8 +1269,8 @@ "documentation":"

The name of the rule.

" }, "EventBusName":{ - "shape":"EventBusName", - "documentation":"

The event bus associated with the rule. If you omit this, the default event bus is used.

" + "shape":"EventBusNameOrArn", + "documentation":"

The name or ARN of the event bus associated with the rule. If you omit this, the default event bus is used.

" } } }, @@ -876,7 +1311,11 @@ }, "EventBusName":{ "shape":"EventBusName", - "documentation":"

The event bus associated with the rule.

" + "documentation":"

The name of the event bus associated with the rule.

" + }, + "CreatedBy":{ + "shape":"CreatedBy", + "documentation":"

The account ID of the user that created the rule. If you use PutRule to put a rule on an event bus in another account, the other account is the owner of the rule, and the rule ARN includes the account ID for that account. However, the value for CreatedBy is the account ID as the account that created the rule in the other account.

" } } }, @@ -889,8 +1328,8 @@ "documentation":"

The name of the rule.

" }, "EventBusName":{ - "shape":"EventBusName", - "documentation":"

The event bus associated with the rule. If you omit this, the default event bus is used.

" + "shape":"EventBusNameOrArn", + "documentation":"

The name or ARN of the event bus associated with the rule. If you omit this, the default event bus is used.

" } } }, @@ -934,8 +1373,8 @@ "documentation":"

The name of the rule.

" }, "EventBusName":{ - "shape":"EventBusName", - "documentation":"

The event bus associated with the rule. If you omit this, the default event bus is used.

" + "shape":"EventBusNameOrArn", + "documentation":"

The name or ARN of the event bus associated with the rule. If you omit this, the default event bus is used.

" } } }, @@ -969,6 +1408,12 @@ "min":1, "pattern":"[/\\.\\-_A-Za-z0-9]+" }, + "EventBusNameOrArn":{ + "type":"string", + "max":1600, + "min":1, + "pattern":"(arn:aws[\\w-]*:events:[a-z]{2}-[a-z]+-[\\w-]+:[0-9]{12}:event-bus\\/)?[/\\.\\-_A-Za-z0-9]+" + }, "EventId":{"type":"string"}, "EventPattern":{"type":"string"}, "EventResource":{"type":"string"}, @@ -1064,6 +1509,13 @@ }, "documentation":"

These are custom parameter to be used when the target is an API Gateway REST APIs.

" }, + "IllegalStatusException":{ + "type":"structure", + "members":{ + }, + "documentation":"

An error occurred because a replay can be canceled only when the state is Running or Starting.

", + "exception":true + }, "InputTransformer":{ "type":"structure", "required":["InputTemplate"], @@ -1130,7 +1582,7 @@ "type":"structure", "members":{ }, - "documentation":"

You tried to create more rules or add more targets to a rule than is allowed.

", + "documentation":"

The request failed because it attempted to create resource beyond the allowed service quota.

", "exception":true }, "LimitMax100":{ @@ -1142,6 +1594,44 @@ "type":"integer", "min":1 }, + "ListArchivesRequest":{ + "type":"structure", + "members":{ + "NamePrefix":{ + "shape":"ArchiveName", + "documentation":"

A name prefix to filter the archives returned. Only archives with name that match the prefix are returned.

" + }, + "EventSourceArn":{ + "shape":"Arn", + "documentation":"

The ARN of the event source associated with the archive.

" + }, + "State":{ + "shape":"ArchiveState", + "documentation":"

The state of the archive.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token returned by a previous call to retrieve the next set of results.

" + }, + "Limit":{ + "shape":"LimitMax100", + "documentation":"

The maximum number of results to return.

" + } + } + }, + "ListArchivesResponse":{ + "type":"structure", + "members":{ + "Archives":{ + "shape":"ArchiveResponseList", + "documentation":"

An array of Archive objects that include details about an archive.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token returned by a previous call to retrieve the next set of results.

" + } + } + }, "ListEventBusesRequest":{ "type":"structure", "members":{ @@ -1264,6 +1754,44 @@ } } }, + "ListReplaysRequest":{ + "type":"structure", + "members":{ + "NamePrefix":{ + "shape":"ReplayName", + "documentation":"

A name prefix to filter the replays returned. Only replays with name that match the prefix are returned.

" + }, + "State":{ + "shape":"ReplayState", + "documentation":"

The state of the replay.

" + }, + "EventSourceArn":{ + "shape":"Arn", + "documentation":"

The ARN of the event source associated with the replay.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token returned by a previous call to retrieve the next set of results.

" + }, + "Limit":{ + "shape":"LimitMax100", + "documentation":"

The maximum number of replays to retrieve.

" + } + } + }, + "ListReplaysResponse":{ + "type":"structure", + "members":{ + "Replays":{ + "shape":"ReplayList", + "documentation":"

An array of Replay objects that contain information about the replay.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token returned by a previous call to retrieve the next set of results.

" + } + } + }, "ListRuleNamesByTargetRequest":{ "type":"structure", "required":["TargetArn"], @@ -1273,8 +1801,8 @@ "documentation":"

The Amazon Resource Name (ARN) of the target resource.

" }, "EventBusName":{ - "shape":"EventBusName", - "documentation":"

Limits the results to show only the rules associated with the specified event bus.

" + "shape":"EventBusNameOrArn", + "documentation":"

The name or ARN of the event bus to list rules for. If you omit this, the default event bus is used.

" }, "NextToken":{ "shape":"NextToken", @@ -1307,8 +1835,8 @@ "documentation":"

The prefix matching the rule name.

" }, "EventBusName":{ - "shape":"EventBusName", - "documentation":"

Limits the results to show only the rules associated with the specified event bus.

" + "shape":"EventBusNameOrArn", + "documentation":"

The name or ARN of the event bus to list the rules for. If you omit this, the default event bus is used.

" }, "NextToken":{ "shape":"NextToken", @@ -1361,8 +1889,8 @@ "documentation":"

The name of the rule.

" }, "EventBusName":{ - "shape":"EventBusName", - "documentation":"

The event bus associated with the rule. If you omit this, the default event bus is used.

" + "shape":"EventBusNameOrArn", + "documentation":"

The name or ARN of the event bus associated with the rule. If you omit this, the default event bus is used.

" }, "NextToken":{ "shape":"NextToken", @@ -1387,6 +1915,7 @@ } } }, + "Long":{"type":"long"}, "ManagedBy":{ "type":"string", "max":128, @@ -1431,6 +1960,12 @@ "min":1, "pattern":"[\\.\\-_A-Za-z0-9]+" }, + "NonPartnerEventBusNameOrArn":{ + "type":"string", + "max":1600, + "min":1, + "pattern":"(arn:aws[\\w-]*:events:[a-z]{2}-[a-z]+-[\\w-]+:[0-9]{12}:event-bus\\/)?[\\.\\-_A-Za-z0-9]+" + }, "OperationDisabledException":{ "type":"structure", "members":{ @@ -1543,8 +2078,8 @@ "documentation":"

A valid JSON string. There is no other schema imposed. The JSON string may contain fields and nested subobjects.

" }, "EventBusName":{ - "shape":"NonPartnerEventBusName", - "documentation":"

The event bus that will receive the event. Only the rules that are associated with this event bus will be able to match the event.

" + "shape":"NonPartnerEventBusNameOrArn", + "documentation":"

The name or ARN of the event bus to receive the event. Only the rules that are associated with this event bus are used to match the event. If you omit this, the default event bus is used.

" } }, "documentation":"

Represents an event to be submitted.

" @@ -1669,15 +2204,10 @@ }, "PutPermissionRequest":{ "type":"structure", - "required":[ - "Action", - "Principal", - "StatementId" - ], "members":{ "EventBusName":{ "shape":"NonPartnerEventBusName", - "documentation":"

The event bus associated with the rule. If you omit this, the default event bus is used.

" + "documentation":"

The name of the event bus associated with the rule. If you omit this, the default event bus is used.

" }, "Action":{ "shape":"Action", @@ -1694,6 +2224,10 @@ "Condition":{ "shape":"Condition", "documentation":"

This parameter enables you to limit the permission to accounts that fulfill a certain condition, such as being a member of a certain AWS organization. For more information about AWS Organizations, see What Is AWS Organizations in the AWS Organizations User Guide.

If you specify Condition with an AWS organization ID, and specify \"*\" as the value for Principal, you grant permission to all the accounts in the named organization.

The Condition is a JSON string which must contain Type, Key, and Value fields.

" + }, + "Policy":{ + "shape":"String", + "documentation":"

A JSON string that describes the permission policy statement. You can include a Policy parameter in the request instead of using the StatementId, Action, Principal, or Condition parameters.

" } } }, @@ -1730,8 +2264,8 @@ "documentation":"

The list of key-value pairs to associate with the rule.

" }, "EventBusName":{ - "shape":"EventBusName", - "documentation":"

The event bus to associate with this rule. If you omit this, the default event bus is used.

" + "shape":"EventBusNameOrArn", + "documentation":"

The name or ARN of the event bus to associate with this rule. If you omit this, the default event bus is used.

" } } }, @@ -1756,8 +2290,8 @@ "documentation":"

The name of the rule.

" }, "EventBusName":{ - "shape":"EventBusName", - "documentation":"

The name of the event bus associated with the rule. If you omit this, the default event bus is used.

" + "shape":"EventBusNameOrArn", + "documentation":"

The name or ARN of the event bus associated with the rule. If you omit this, the default event bus is used.

" }, "Targets":{ "shape":"TargetList", @@ -1857,12 +2391,15 @@ }, "RemovePermissionRequest":{ "type":"structure", - "required":["StatementId"], "members":{ "StatementId":{ "shape":"StatementId", "documentation":"

The statement ID corresponding to the account that is no longer allowed to put events to the default event bus.

" }, + "RemoveAllPermissions":{ + "shape":"Boolean", + "documentation":"

Specifies whether to remove all permissions.

" + }, "EventBusName":{ "shape":"NonPartnerEventBusName", "documentation":"

The name of the event bus to revoke permissions for. If you omit this, the default event bus is used.

" @@ -1881,8 +2418,8 @@ "documentation":"

The name of the rule.

" }, "EventBusName":{ - "shape":"EventBusName", - "documentation":"

The name of the event bus associated with the rule.

" + "shape":"EventBusNameOrArn", + "documentation":"

The name or ARN of the event bus associated with the rule. If you omit this, the default event bus is used.

" }, "Ids":{ "shape":"TargetIdList", @@ -1929,6 +2466,104 @@ "type":"list", "member":{"shape":"RemoveTargetsResultEntry"} }, + "Replay":{ + "type":"structure", + "members":{ + "ReplayName":{ + "shape":"ReplayName", + "documentation":"

The name of the replay.

" + }, + "EventSourceArn":{ + "shape":"Arn", + "documentation":"

The ARN of the archive to replay event from.

" + }, + "State":{ + "shape":"ReplayState", + "documentation":"

The current state of the replay.

" + }, + "StateReason":{ + "shape":"ReplayStateReason", + "documentation":"

A description of why the replay is in the current state.

" + }, + "EventStartTime":{ + "shape":"Timestamp", + "documentation":"

A time stamp for the time to start replaying events. This is determined by the time in the event as described in Time.

" + }, + "EventEndTime":{ + "shape":"Timestamp", + "documentation":"

A time stamp for the time to start replaying events. Any event with a creation time prior to the EventEndTime specified is replayed.

" + }, + "EventLastReplayedTime":{ + "shape":"Timestamp", + "documentation":"

A time stamp for the time that the last event was replayed.

" + }, + "ReplayStartTime":{ + "shape":"Timestamp", + "documentation":"

A time stamp for the time that the replay started.

" + }, + "ReplayEndTime":{ + "shape":"Timestamp", + "documentation":"

A time stamp for the time that the replay completed.

" + } + }, + "documentation":"

A Replay object that contains details about a replay.

" + }, + "ReplayArn":{ + "type":"string", + "max":1600, + "min":1, + "pattern":"^arn:aws([a-z]|\\-)*:events:([a-z]|\\d|\\-)*:([0-9]{12})?:.+\\/[\\.\\-_A-Za-z0-9]+$" + }, + "ReplayDescription":{ + "type":"string", + "max":512, + "pattern":".*" + }, + "ReplayDestination":{ + "type":"structure", + "required":["Arn"], + "members":{ + "Arn":{ + "shape":"Arn", + "documentation":"

The ARN of the event bus to replay event to. You can replay events only to the event bus specified to create the archive.

" + }, + "FilterArns":{ + "shape":"ReplayDestinationFilters", + "documentation":"

A list of ARNs for rules to replay events to.

" + } + }, + "documentation":"

A ReplayDestination object that contains details about a replay.

" + }, + "ReplayDestinationFilters":{ + "type":"list", + "member":{"shape":"Arn"} + }, + "ReplayList":{ + "type":"list", + "member":{"shape":"Replay"} + }, + "ReplayName":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[\\.\\-_A-Za-z0-9]+" + }, + "ReplayState":{ + "type":"string", + "enum":[ + "STARTING", + "RUNNING", + "CANCELLING", + "COMPLETED", + "CANCELLED", + "FAILED" + ] + }, + "ReplayStateReason":{ + "type":"string", + "max":512, + "pattern":".*" + }, "ResourceAlreadyExistsException":{ "type":"structure", "members":{ @@ -1948,6 +2583,10 @@ "documentation":"

An entity that you specified does not exist.

", "exception":true }, + "RetentionDays":{ + "type":"integer", + "min":0 + }, "RetryPolicy":{ "type":"structure", "members":{ @@ -2004,7 +2643,7 @@ }, "EventBusName":{ "shape":"EventBusName", - "documentation":"

The event bus associated with the rule.

" + "documentation":"

The name or ARN of the event bus associated with the rule. If you omit this, the default event bus is used.

" } }, "documentation":"

Contains information about a rule in Amazon EventBridge.

" @@ -2110,6 +2749,63 @@ }, "documentation":"

This structure includes the custom parameter to be used when the target is an SQS FIFO queue.

" }, + "StartReplayRequest":{ + "type":"structure", + "required":[ + "ReplayName", + "EventSourceArn", + "EventStartTime", + "EventEndTime", + "Destination" + ], + "members":{ + "ReplayName":{ + "shape":"ReplayName", + "documentation":"

The name of the replay to start.

" + }, + "Description":{ + "shape":"ReplayDescription", + "documentation":"

A description for the replay to start.

" + }, + "EventSourceArn":{ + "shape":"Arn", + "documentation":"

The ARN of the archive to replay events from.

" + }, + "EventStartTime":{ + "shape":"Timestamp", + "documentation":"

A time stamp for the time to start replaying events. Only events that occurred between the EventStartTime and EventEndTime are replayed.

" + }, + "EventEndTime":{ + "shape":"Timestamp", + "documentation":"

A time stamp for the time to stop replaying events. Only events that occurred between the EventStartTime and EventEndTime are replayed.

" + }, + "Destination":{ + "shape":"ReplayDestination", + "documentation":"

A ReplayDestination object that includes details about the destination for the replay.

" + } + } + }, + "StartReplayResponse":{ + "type":"structure", + "members":{ + "ReplayArn":{ + "shape":"ReplayArn", + "documentation":"

The ARN of the replay.

" + }, + "State":{ + "shape":"ReplayState", + "documentation":"

The state of the replay.

" + }, + "StateReason":{ + "shape":"ReplayStateReason", + "documentation":"

The reason that the replay is in the state.

" + }, + "ReplayStartTime":{ + "shape":"Timestamp", + "documentation":"

The time at which the replay started.

" + } + } + }, "StatementId":{ "type":"string", "max":64, @@ -2348,6 +3044,49 @@ "type":"structure", "members":{ } + }, + "UpdateArchiveRequest":{ + "type":"structure", + "required":["ArchiveName"], + "members":{ + "ArchiveName":{ + "shape":"ArchiveName", + "documentation":"

The name of the archive to update.

" + }, + "Description":{ + "shape":"ArchiveDescription", + "documentation":"

The description for the archive.

" + }, + "EventPattern":{ + "shape":"EventPattern", + "documentation":"

The event pattern to use to filter events sent to the archive.

" + }, + "RetentionDays":{ + "shape":"RetentionDays", + "documentation":"

The number of days to retain events in the archive.

" + } + } + }, + "UpdateArchiveResponse":{ + "type":"structure", + "members":{ + "ArchiveArn":{ + "shape":"ArchiveArn", + "documentation":"

The ARN of the archive.

" + }, + "State":{ + "shape":"ArchiveState", + "documentation":"

The state of the archive.

" + }, + "StateReason":{ + "shape":"ArchiveStateReason", + "documentation":"

The reason that the archive is in the current state.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

The time at which the archive was updated.

" + } + } } }, "documentation":"

Amazon EventBridge helps you to respond to state changes in your AWS resources. When your resources change state, they automatically send events into an event stream. You can create rules that match selected events in the stream and route them to targets to take action. You can also use rules to take action on a predetermined schedule. For example, you can configure rules to:

  • Automatically invoke an AWS Lambda function to update DNS entries when an event notifies you that Amazon EC2 instance enters the running state.

  • Direct specific API records from AWS CloudTrail to an Amazon Kinesis data stream for detailed analysis of potential security or availability risks.

  • Periodically invoke a built-in target to create a snapshot of an Amazon EBS volume.

For more information about the features of Amazon EventBridge, see the Amazon EventBridge User Guide.

" diff --git a/services/cloudwatchlogs/pom.xml b/services/cloudwatchlogs/pom.xml index 0e679e2715ca..ceda6068616a 100644 --- a/services/cloudwatchlogs/pom.xml +++ b/services/cloudwatchlogs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT cloudwatchlogs AWS Java SDK :: Services :: Amazon CloudWatch Logs diff --git a/services/codeartifact/pom.xml b/services/codeartifact/pom.xml index 8c451b66d941..b8009c79ebf4 100644 --- a/services/codeartifact/pom.xml +++ b/services/codeartifact/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT codeartifact AWS Java SDK :: Services :: Codeartifact diff --git a/services/codeartifact/src/main/resources/codegen-resources/service-2.json b/services/codeartifact/src/main/resources/codegen-resources/service-2.json index 1504f3b5e14e..43b491adcad1 100644 --- a/services/codeartifact/src/main/resources/codegen-resources/service-2.json +++ b/services/codeartifact/src/main/resources/codegen-resources/service-2.json @@ -100,7 +100,6 @@ {"shape":"AccessDeniedException"}, {"shape":"ConflictException"}, {"shape":"InternalServerException"}, - {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], @@ -265,7 +264,7 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Deletes the assets in package versions and sets the package versions' status to Disposed. A disposed package version cannot be restored in your repository because its assets are deleted.

To view all disposed package versions in a repository, use ListackageVersions and set the status parameter to Disposed.

To view information about a disposed package version, use ListPackageVersions and set the status parameter to Disposed.

" + "documentation":"

Deletes the assets in package versions and sets the package versions' status to Disposed. A disposed package version cannot be restored in your repository because its assets are deleted.

To view all disposed package versions in a repository, use ListPackageVersions and set the status parameter to Disposed.

To view information about a disposed package version, use DescribePackageVersion ..

" }, "GetAuthorizationToken":{ "name":"GetAuthorizationToken", @@ -282,7 +281,7 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Generates a temporary authentication token for accessing repositories in the domain. This API requires the codeartifact:GetAuthorizationToken and sts:GetServiceBearerToken permissions.

CodeArtifact authorization tokens are valid for a period of 12 hours when created with the login command. You can call login periodically to refresh the token. When you create an authorization token with the GetAuthorizationToken API, you can set a custom authorization period, up to a maximum of 12 hours, with the durationSeconds parameter.

The authorization period begins after login or GetAuthorizationToken is called. If login or GetAuthorizationToken is called while assuming a role, the token lifetime is independent of the maximum session duration of the role. For example, if you call sts assume-role and specify a session duration of 15 minutes, then generate a CodeArtifact authorization token, the token will be valid for the full authorization period even though this is longer than the 15-minute session duration.

See Using IAM Roles for more information on controlling session duration.

" + "documentation":"

Generates a temporary authorization token for accessing repositories in the domain. This API requires the codeartifact:GetAuthorizationToken and sts:GetServiceBearerToken permissions. For more information about authorization tokens, see AWS CodeArtifact authentication and tokens.

CodeArtifact authorization tokens are valid for a period of 12 hours when created with the login command. You can call login periodically to refresh the token. When you create an authorization token with the GetAuthorizationToken API, you can set a custom authorization period, up to a maximum of 12 hours, with the durationSeconds parameter.

The authorization period begins after login or GetAuthorizationToken is called. If login or GetAuthorizationToken is called while assuming a role, the token lifetime is independent of the maximum session duration of the role. For example, if you call sts assume-role and specify a session duration of 15 minutes, then generate a CodeArtifact authorization token, the token will be valid for the full authorization period even though this is longer than the 15-minute session duration.

See Using IAM Roles for more information on controlling session duration.

" }, "GetDomainPermissionsPolicy":{ "name":"GetDomainPermissionsPolicy", @@ -314,7 +313,8 @@ {"shape":"InternalServerException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, - {"shape":"ValidationException"} + {"shape":"ValidationException"}, + {"shape":"ConflictException"} ], "documentation":"

Returns an asset (or file) that is in a package. For example, for a Maven package version, use GetPackageVersionAsset to download a JAR file, a POM file, or any other assets in the package version.

" }, @@ -350,7 +350,7 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Returns the endpoint of a repository for a specific package format. A repository has one endpoint for each package format:

  • npm

  • pypi

  • maven

" + "documentation":"

Returns the endpoint of a repository for a specific package format. A repository has one endpoint for each package format:

  • npm

  • pypi

  • maven

  • nuget

" }, "GetRepositoryPermissionsPolicy":{ "name":"GetRepositoryPermissionsPolicy", @@ -486,6 +486,22 @@ ], "documentation":"

Returns a list of RepositorySummary objects. Each RepositorySummary contains information about a repository in the specified domain and that matches the input parameters.

" }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/v1/tags" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Gets information about AWS tags for a specified Amazon Resource Name (ARN) in AWS CodeArtifact.

" + }, "PutDomainPermissionsPolicy":{ "name":"PutDomainPermissionsPolicy", "http":{ @@ -503,7 +519,7 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Sets a resource policy on a domain that specifies permissions to access it.

" + "documentation":"

Sets a resource policy on a domain that specifies permissions to access it.

When you call PutDomainPermissionsPolicy, the resource policy on the domain is ignored when evaluting permissions. This ensures that the owner of a domain cannot lock themselves out of the domain, which would prevent them from being able to update the resource policy.

" }, "PutRepositoryPermissionsPolicy":{ "name":"PutRepositoryPermissionsPolicy", @@ -522,7 +538,40 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Sets the resource policy on a repository that specifies permissions to access it.

" + "documentation":"

Sets the resource policy on a repository that specifies permissions to access it.

When you call PutRepositoryPermissionsPolicy, the resource policy on the repository is ignored when evaluting permissions. This ensures that the owner of a repository cannot lock themselves out of the repository, which would prevent them from being able to update the resource policy.

" + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/v1/tag" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Adds or updates tags for a resource in AWS CodeArtifact.

" + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/v1/untag" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Removes tags from a resource in AWS CodeArtifact.

" }, "UpdatePackageVersionsStatus":{ "name":"UpdatePackageVersionsStatus", @@ -581,7 +630,7 @@ }, "Arn":{ "type":"string", - "max":2048, + "max":1011, "min":1, "pattern":"\\S+" }, @@ -651,7 +700,7 @@ }, "externalConnection":{ "shape":"ExternalConnectionName", - "documentation":"

The name of the external connection to add to the repository. The following values are supported:

  • public:npmjs - for the npm public repository.

  • public:pypi - for the Python Package Index.

  • public:maven-central - for Maven Central.

  • public:maven-googleandroid - for the Google Android repository.

  • public:maven-gradleplugins - for the Gradle plugins repository.

  • public:maven-commonsware - for the CommonsWare Android repository.

", + "documentation":"

The name of the external connection to add to the repository. The following values are supported:

  • public:npmjs - for the npm public repository.

  • public:pypi - for the Python Package Index.

  • public:maven-central - for Maven Central.

  • public:maven-googleandroid - for the Google Android repository.

  • public:maven-gradleplugins - for the Gradle plugins repository.

  • public:maven-commonsware - for the CommonsWare Android repository.

  • public:nuget-org - for the NuGet Gallery.

", "location":"querystring", "locationName":"external-connection" } @@ -726,13 +775,13 @@ }, "format":{ "shape":"PackageFormat", - "documentation":"

The format of the package that is copied. The valid package types are:

  • npm: A Node Package Manager (npm) package.

  • pypi: A Python Package Index (PyPI) package.

  • maven: A Maven package that contains compiled code in a distributable format, such as a JAR file.

", + "documentation":"

The format of the package that is copied. The valid package types are:

  • npm: A Node Package Manager (npm) package.

  • pypi: A Python Package Index (PyPI) package.

  • maven: A Maven package that contains compiled code in a distributable format, such as a JAR file.

  • nuget: A NuGet package.

", "location":"querystring", "locationName":"format" }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

  • A NuGet package does not contain a corresponding component, so NuGet packages do not have a namespace.

", "location":"querystring", "locationName":"namespace" }, @@ -786,6 +835,10 @@ "encryptionKey":{ "shape":"Arn", "documentation":"

The encryption key for the domain. This is used to encrypt content stored in a domain. An encryption key can be a key ID, a key Amazon Resource Name (ARN), a key alias, or a key alias ARN. To specify an encryptionKey, your IAM role must have kms:DescribeKey and kms:CreateGrant permissions on the encryption key that is used. For more information, see DescribeKey in the AWS Key Management Service API Reference and AWS KMS API Permissions Reference in the AWS Key Management Service Developer Guide.

CodeArtifact supports only symmetric CMKs. Do not associate an asymmetric CMK with your domain. For more information, see Using symmetric and asymmetric keys in the AWS Key Management Service Developer Guide.

" + }, + "tags":{ + "shape":"TagList", + "documentation":"

One or more tag key-value pairs for the domain.

" } } }, @@ -830,6 +883,10 @@ "upstreams":{ "shape":"UpstreamRepositoryList", "documentation":"

A list of upstream repositories to associate with the repository. The order of the upstream repositories in the list determines their priority order when AWS CodeArtifact looks for a requested package version. For more information, see Working with upstream repositories.

" + }, + "tags":{ + "shape":"TagList", + "documentation":"

One or more tag key-value pairs for the repository.

" } } }, @@ -932,13 +989,13 @@ }, "format":{ "shape":"PackageFormat", - "documentation":"

The format of the package versions to delete. The valid values are:

  • npm

  • pypi

  • maven

", + "documentation":"

The format of the package versions to delete. The valid values are:

  • npm

  • pypi

  • maven

  • nuget

", "location":"querystring", "locationName":"format" }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

  • A NuGet package does not contain a corresponding component, so NuGet packages do not have a namespace.

", "location":"querystring", "locationName":"namespace" }, @@ -1103,13 +1160,13 @@ }, "format":{ "shape":"PackageFormat", - "documentation":"

A format that specifies the type of the requested package version. The valid values are:

  • npm

  • pypi

  • maven

", + "documentation":"

A format that specifies the type of the requested package version. The valid values are:

  • npm

  • pypi

  • maven

  • nuget

", "location":"querystring", "locationName":"format" }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

  • A NuGet package does not contain a corresponding component, so NuGet packages do not have a namespace.

", "location":"querystring", "locationName":"namespace" }, @@ -1251,13 +1308,13 @@ }, "format":{ "shape":"PackageFormat", - "documentation":"

A format that specifies the type of package versions you want to dispose. The valid values are:

  • npm

  • pypi

  • maven

", + "documentation":"

A format that specifies the type of package versions you want to dispose. The valid values are:

  • npm

  • pypi

  • maven

  • nuget

", "location":"querystring", "locationName":"format" }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

  • A NuGet package does not contain a corresponding component, so NuGet packages do not have a namespace.

", "location":"querystring", "locationName":"namespace" }, @@ -1328,6 +1385,10 @@ "assetSizeBytes":{ "shape":"Long", "documentation":"

The total size of all assets in the domain.

" + }, + "s3BucketArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the Amazon S3 bucket that is used to store package assets in the domain.

" } }, "documentation":"

Information about a domain. A domain is a container for repositories. When you create a domain, it is empty until you add one or more repositories.

" @@ -1406,7 +1467,7 @@ }, "durationSeconds":{ "shape":"AuthorizationTokenDurationSeconds", - "documentation":"

The time, in seconds, that the generated authorization token is valid.

", + "documentation":"

The time, in seconds, that the generated authorization token is valid. Valid values are 0 and any number between 900 (15 minutes) and 43200 (12 hours). A value of 0 will set the expiration of the authorization token to the same expiration of the user's role's temporary credentials.

", "location":"querystring", "locationName":"duration" } @@ -1483,13 +1544,13 @@ }, "format":{ "shape":"PackageFormat", - "documentation":"

A format that specifies the type of the package version with the requested asset file. The valid values are:

  • npm

  • pypi

  • maven

", + "documentation":"

A format that specifies the type of the package version with the requested asset file. The valid values are:

  • npm

  • pypi

  • maven

  • nuget

", "location":"querystring", "locationName":"format" }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

  • A NuGet package does not contain a corresponding component, so NuGet packages do not have a namespace.

", "location":"querystring", "locationName":"namespace" }, @@ -1577,13 +1638,13 @@ }, "format":{ "shape":"PackageFormat", - "documentation":"

A format that specifies the type of the package version with the requested readme file. The valid values are:

  • npm

  • pypi

  • maven

", + "documentation":"

A format that specifies the type of the package version with the requested readme file. The valid values are:

  • npm

  • pypi

  • maven

  • nuget

", "location":"querystring", "locationName":"format" }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

  • A NuGet package does not contain a corresponding component, so NuGet packages do not have a namespace.

", "location":"querystring", "locationName":"namespace" }, @@ -1606,11 +1667,11 @@ "members":{ "format":{ "shape":"PackageFormat", - "documentation":"

The format of the package with the requested readme file. Valid format types are:

  • npm

  • pypi

  • maven

" + "documentation":"

The format of the package with the requested readme file. Valid format types are:

  • npm

  • pypi

  • maven

  • nuget

" }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

" + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

  • A NuGet package does not contain a corresponding component, so NuGet packages do not have a namespace.

" }, "package":{ "shape":"PackageName", @@ -1658,7 +1719,7 @@ }, "format":{ "shape":"PackageFormat", - "documentation":"

Returns which endpoint of a repository to return. A repository has one endpoint for each package format:

  • npm

  • pypi

  • maven

", + "documentation":"

Returns which endpoint of a repository to return. A repository has one endpoint for each package format:

  • npm

  • pypi

  • maven

  • nuget

", "location":"querystring", "locationName":"format" } @@ -1820,13 +1881,13 @@ }, "format":{ "shape":"PackageFormat", - "documentation":"

The format of the package that contains the returned package version assets. The valid package types are:

  • npm: A Node Package Manager (npm) package.

  • pypi: A Python Package Index (PyPI) package.

  • maven: A Maven package that contains compiled code in a distributable format, such as a JAR file.

", + "documentation":"

The format of the package that contains the returned package version assets. The valid package types are:

  • npm: A Node Package Manager (npm) package.

  • pypi: A Python Package Index (PyPI) package.

  • maven: A Maven package that contains compiled code in a distributable format, such as a JAR file.

  • nuget: A NuGet package.

", "location":"querystring", "locationName":"format" }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

  • A NuGet package does not contain a corresponding component, so NuGet packages do not have a namespace.

", "location":"querystring", "locationName":"namespace" }, @@ -1865,7 +1926,7 @@ }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

" + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

  • A NuGet package does not contain a corresponding component, so NuGet packages do not have a namespace.

" }, "package":{ "shape":"PackageName", @@ -1919,13 +1980,13 @@ }, "format":{ "shape":"PackageFormat", - "documentation":"

The format of the package with the requested dependencies. The valid package types are:

  • npm: A Node Package Manager (npm) package.

  • pypi: A Python Package Index (PyPI) package.

  • maven: A Maven package that contains compiled code in a distributable format, such as a JAR file.

", + "documentation":"

The format of the package with the requested dependencies. The valid package types are:

  • npm: A Node Package Manager (npm) package.

  • pypi: A Python Package Index (PyPI) package.

  • maven: A Maven package that contains compiled code in a distributable format, such as a JAR file.

  • nuget: A NuGet package.

", "location":"querystring", "locationName":"format" }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

  • A NuGet package does not contain a corresponding component, so NuGet packages do not have a namespace.

", "location":"querystring", "locationName":"namespace" }, @@ -1954,11 +2015,11 @@ "members":{ "format":{ "shape":"PackageFormat", - "documentation":"

A format that specifies the type of the package that contains the returned dependencies. The valid values are:

  • npm

  • pypi

  • maven

" + "documentation":"

A format that specifies the type of the package that contains the returned dependencies. The valid values are:

  • npm

  • pypi

  • maven

  • nuget

" }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

" + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

  • A NuGet package does not contain a corresponding component, so NuGet packages do not have a namespace.

" }, "package":{ "shape":"PackageName", @@ -2016,13 +2077,13 @@ }, "format":{ "shape":"PackageFormat", - "documentation":"

The format of the returned packages. The valid package types are:

  • npm: A Node Package Manager (npm) package.

  • pypi: A Python Package Index (PyPI) package.

  • maven: A Maven package that contains compiled code in a distributable format, such as a JAR file.

", + "documentation":"

The format of the returned packages. The valid package types are:

  • npm: A Node Package Manager (npm) package.

  • pypi: A Python Package Index (PyPI) package.

  • maven: A Maven package that contains compiled code in a distributable format, such as a JAR file.

  • nuget: A NuGet package.

", "location":"querystring", "locationName":"format" }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

  • A NuGet package does not contain a corresponding component, so NuGet packages do not have a namespace.

", "location":"querystring", "locationName":"namespace" }, @@ -2067,11 +2128,11 @@ }, "format":{ "shape":"PackageFormat", - "documentation":"

A format of the package. Valid package format values are:

  • npm

  • pypi

  • maven

" + "documentation":"

A format of the package. Valid package format values are:

  • npm

  • pypi

  • maven

  • nuget

" }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

" + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

  • A NuGet package does not contain a corresponding component, so NuGet packages do not have a namespace.

" }, "package":{ "shape":"PackageName", @@ -2119,19 +2180,19 @@ }, "format":{ "shape":"PackageFormat", - "documentation":"

The format of the packages. The valid package types are:

  • npm: A Node Package Manager (npm) package.

  • pypi: A Python Package Index (PyPI) package.

  • maven: A Maven package that contains compiled code in a distributable format, such as a JAR file.

", + "documentation":"

The format of the packages. The valid package types are:

  • npm: A Node Package Manager (npm) package.

  • pypi: A Python Package Index (PyPI) package.

  • maven: A Maven package that contains compiled code in a distributable format, such as a JAR file.

  • nuget: A NuGet package.

", "location":"querystring", "locationName":"format" }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

  • A NuGet package does not contain a corresponding component, so NuGet packages do not have a namespace.

", "location":"querystring", "locationName":"namespace" }, "packagePrefix":{ "shape":"PackageName", - "documentation":"

A prefix used to filter returned repositories. Only repositories with names that start with repositoryPrefix are returned.

", + "documentation":"

A prefix used to filter returned packages. Only packages with names that start with packagePrefix are returned.

", "location":"querystring", "locationName":"package-prefix" }, @@ -2263,6 +2324,27 @@ } } }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the resource to get tags for.

", + "location":"querystring", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceResult":{ + "type":"structure", + "members":{ + "tags":{ + "shape":"TagList", + "documentation":"

A list of tag key and value pairs associated with the specified resource.

" + } + } + }, "Long":{"type":"long"}, "LongOptional":{"type":"long"}, "PackageDependency":{ @@ -2270,7 +2352,7 @@ "members":{ "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

" + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

  • A NuGet package does not contain a corresponding component, so NuGet packages do not have a namespace.

" }, "package":{ "shape":"PackageName", @@ -2296,7 +2378,8 @@ "enum":[ "npm", "pypi", - "maven" + "maven", + "nuget" ] }, "PackageName":{ @@ -2316,11 +2399,11 @@ "members":{ "format":{ "shape":"PackageFormat", - "documentation":"

The format of the package. Valid values are:

  • npm

  • pypi

  • maven

" + "documentation":"

The format of the package. Valid values are:

  • npm

  • pypi

  • maven

  • nuget

" }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

" + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

  • A NuGet package does not contain a corresponding component, so NuGet packages do not have a namespace.

" }, "package":{ "shape":"PackageName", @@ -2344,11 +2427,11 @@ "members":{ "format":{ "shape":"PackageFormat", - "documentation":"

The format of the package version. The valid package formats are:

  • npm: A Node Package Manager (npm) package.

  • pypi: A Python Package Index (PyPI) package.

  • maven: A Maven package that contains compiled code in a distributable format, such as a JAR file.

" + "documentation":"

The format of the package version. The valid package formats are:

  • npm: A Node Package Manager (npm) package.

  • pypi: A Python Package Index (PyPI) package.

  • maven: A Maven package that contains compiled code in a distributable format, such as a JAR file.

  • nuget: A NuGet package.

" }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

" + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

  • A NuGet package does not contain a corresponding component, so NuGet packages do not have a namespace.

" }, "packageName":{ "shape":"PackageName", @@ -2425,7 +2508,8 @@ }, "PackageVersionList":{ "type":"list", - "member":{"shape":"PackageVersion"} + "member":{"shape":"PackageVersion"}, + "max":100 }, "PackageVersionRevision":{ "type":"string", @@ -2622,7 +2706,7 @@ }, "packageFormat":{ "shape":"PackageFormat", - "documentation":"

The package format associated with a repository's external connection. The valid package formats are:

  • npm: A Node Package Manager (npm) package.

  • pypi: A Python Package Index (PyPI) package.

  • maven: A Maven package that contains compiled code in a distributable format, such as a JAR file.

" + "documentation":"

The package format associated with a repository's external connection. The valid package formats are:

  • npm: A Node Package Manager (npm) package.

  • pypi: A Python Package Index (PyPI) package.

  • maven: A Maven package that contains compiled code in a distributable format, such as a JAR file.

  • nuget: A NuGet package.

" }, "status":{ "shape":"ExternalConnectionStatus", @@ -2765,6 +2849,70 @@ "key":{"shape":"PackageVersion"}, "value":{"shape":"SuccessfulPackageVersionInfo"} }, + "Tag":{ + "type":"structure", + "required":[ + "key", + "value" + ], + "members":{ + "key":{ + "shape":"TagKey", + "documentation":"

The tag key.

" + }, + "value":{ + "shape":"TagValue", + "documentation":"

The tag value.

" + } + }, + "documentation":"

A tag is a key-value pair that can be used to manage, search for, or filter resources in AWS CodeArtifact.

" + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":0 + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":200, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the resource that you want to add or update tags for.

", + "location":"querystring", + "locationName":"resourceArn" + }, + "tags":{ + "shape":"TagList", + "documentation":"

The tags you want to modify or add to the resource.

" + } + } + }, + "TagResourceResult":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0 + }, "ThrottlingException":{ "type":"structure", "required":["message"], @@ -2782,6 +2930,30 @@ "exception":true }, "Timestamp":{"type":"timestamp"}, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the resource that you want to remove tags from.

", + "location":"querystring", + "locationName":"resourceArn" + }, + "tagKeys":{ + "shape":"TagKeyList", + "documentation":"

The tag key for each tag that you want to remove from the resource.

" + } + } + }, + "UntagResourceResult":{ + "type":"structure", + "members":{ + } + }, "UpdatePackageVersionsStatusRequest":{ "type":"structure", "required":[ @@ -2813,13 +2985,13 @@ }, "format":{ "shape":"PackageFormat", - "documentation":"

A format that specifies the type of the package with the statuses to update. The valid values are:

  • npm

  • pypi

  • maven

", + "documentation":"

A format that specifies the type of the package with the statuses to update. The valid values are:

  • npm

  • pypi

  • maven

  • nuget

", "location":"querystring", "locationName":"format" }, "namespace":{ "shape":"PackageNamespace", - "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

  • A NuGet package does not contain a corresponding component, so NuGet packages do not have a namespace.

", "location":"querystring", "locationName":"namespace" }, @@ -2958,5 +3130,5 @@ ] } }, - "documentation":"

AWS CodeArtifact is a fully managed artifact repository compatible with language-native package managers and build tools such as npm, Apache Maven, and pip. You can use CodeArtifact to share packages with development teams and pull packages. Packages can be pulled from both public and CodeArtifact repositories. You can also create an upstream relationship between a CodeArtifact repository and another repository, which effectively merges their contents from the point of view of a package manager client.

AWS CodeArtifact Components

Use the information in this guide to help you work with the following CodeArtifact components:

  • Repository: A CodeArtifact repository contains a set of package versions, each of which maps to a set of assets, or files. Repositories are polyglot, so a single repository can contain packages of any supported type. Each repository exposes endpoints for fetching and publishing packages using tools like the npm CLI, the Maven CLI ( mvn ), and pip . You can create up to 100 repositories per AWS account.

  • Domain: Repositories are aggregated into a higher-level entity known as a domain. All package assets and metadata are stored in the domain, but are consumed through repositories. A given package asset, such as a Maven JAR file, is stored once per domain, no matter how many repositories it's present in. All of the assets and metadata in a domain are encrypted with the same customer master key (CMK) stored in AWS Key Management Service (AWS KMS).

    Each repository is a member of a single domain and can't be moved to a different domain.

    The domain allows organizational policy to be applied across multiple repositories, such as which accounts can access repositories in the domain, and which public repositories can be used as sources of packages.

    Although an organization can have multiple domains, we recommend a single production domain that contains all published artifacts so that teams can find and share packages across their organization.

  • Package: A package is a bundle of software and the metadata required to resolve dependencies and install the software. CodeArtifact supports npm, PyPI, and Maven package formats.

    In CodeArtifact, a package consists of:

    • A name (for example, webpack is the name of a popular npm package)

    • An optional namespace (for example, @types in @types/node)

    • A set of versions (for example, 1.0.0, 1.0.1, 1.0.2, etc.)

    • Package-level metadata (for example, npm tags)

  • Package version: A version of a package, such as @types/node 12.6.9. The version number format and semantics vary for different package formats. For example, npm package versions must conform to the Semantic Versioning specification. In CodeArtifact, a package version consists of the version identifier, metadata at the package version level, and a set of assets.

  • Upstream repository: One repository is upstream of another when the package versions in it can be accessed from the repository endpoint of the downstream repository, effectively merging the contents of the two repositories from the point of view of a client. CodeArtifact allows creating an upstream relationship between two repositories.

  • Asset: An individual file stored in CodeArtifact associated with a package version, such as an npm .tgz file or Maven POM and JAR files.

CodeArtifact supports these operations:

  • AssociateExternalConnection: Adds an existing external connection to a repository.

  • CopyPackageVersions: Copies package versions from one repository to another repository in the same domain.

  • CreateDomain: Creates a domain

  • CreateRepository: Creates a CodeArtifact repository in a domain.

  • DeleteDomain: Deletes a domain. You cannot delete a domain that contains repositories.

  • DeleteDomainPermissionsPolicy: Deletes the resource policy that is set on a domain.

  • DeletePackageVersions: Deletes versions of a package. After a package has been deleted, it can be republished, but its assets and metadata cannot be restored because they have been permanently removed from storage.

  • DeleteRepository: Deletes a repository.

  • DeleteRepositoryPermissionsPolicy: Deletes the resource policy that is set on a repository.

  • DescribeDomain: Returns a DomainDescription object that contains information about the requested domain.

  • DescribePackageVersion: Returns a PackageVersionDescription object that contains details about a package version.

  • DescribeRepository: Returns a RepositoryDescription object that contains detailed information about the requested repository.

  • DisposePackageVersions: Disposes versions of a package. A package version with the status Disposed cannot be restored because they have been permanently removed from storage.

  • DisassociateExternalConnection: Removes an existing external connection from a repository.

  • GetAuthorizationToken: Generates a temporary authorization token for accessing repositories in the domain. The token expires the authorization period has passed. The default authorization period is 12 hours and can be customized to any length with a maximum of 12 hours.

  • GetDomainPermissionsPolicy: Returns the policy of a resource that is attached to the specified domain.

  • GetPackageVersionAsset: Returns the contents of an asset that is in a package version.

  • GetPackageVersionReadme: Gets the readme file or descriptive text for a package version.

  • GetRepositoryEndpoint: Returns the endpoint of a repository for a specific package format. A repository has one endpoint for each package format:

    • npm

    • pypi

    • maven

  • GetRepositoryPermissionsPolicy: Returns the resource policy that is set on a repository.

  • ListDomains: Returns a list of DomainSummary objects. Each returned DomainSummary object contains information about a domain.

  • ListPackages: Lists the packages in a repository.

  • ListPackageVersionAssets: Lists the assets for a given package version.

  • ListPackageVersionDependencies: Returns a list of the direct dependencies for a package version.

  • ListPackageVersions: Returns a list of package versions for a specified package in a repository.

  • ListRepositories: Returns a list of repositories owned by the AWS account that called this method.

  • ListRepositoriesInDomain: Returns a list of the repositories in a domain.

  • PutDomainPermissionsPolicy: Attaches a resource policy to a domain.

  • PutRepositoryPermissionsPolicy: Sets the resource policy on a repository that specifies permissions to access it.

  • UpdatePackageVersionsStatus: Updates the status of one or more versions of a package.

  • UpdateRepository: Updates the properties of a repository.

" + "documentation":"

AWS CodeArtifact is a fully managed artifact repository compatible with language-native package managers and build tools such as npm, Apache Maven, NuGet, and pip. You can use CodeArtifact to share packages with development teams and pull packages. Packages can be pulled from both public and CodeArtifact repositories. You can also create an upstream relationship between a CodeArtifact repository and another repository, which effectively merges their contents from the point of view of a package manager client.

AWS CodeArtifact Components

Use the information in this guide to help you work with the following CodeArtifact components:

  • Repository: A CodeArtifact repository contains a set of package versions, each of which maps to a set of assets, or files. Repositories are polyglot, so a single repository can contain packages of any supported type. Each repository exposes endpoints for fetching and publishing packages using tools like the npm CLI, the NuGet CLI, the Maven CLI ( mvn ), and pip .

  • Domain: Repositories are aggregated into a higher-level entity known as a domain. All package assets and metadata are stored in the domain, but are consumed through repositories. A given package asset, such as a Maven JAR file, is stored once per domain, no matter how many repositories it's present in. All of the assets and metadata in a domain are encrypted with the same customer master key (CMK) stored in AWS Key Management Service (AWS KMS).

    Each repository is a member of a single domain and can't be moved to a different domain.

    The domain allows organizational policy to be applied across multiple repositories, such as which accounts can access repositories in the domain, and which public repositories can be used as sources of packages.

    Although an organization can have multiple domains, we recommend a single production domain that contains all published artifacts so that teams can find and share packages across their organization.

  • Package: A package is a bundle of software and the metadata required to resolve dependencies and install the software. CodeArtifact supports npm, PyPI, Maven, and NuGet package formats.

    In CodeArtifact, a package consists of:

    • A name (for example, webpack is the name of a popular npm package)

    • An optional namespace (for example, @types in @types/node)

    • A set of versions (for example, 1.0.0, 1.0.1, 1.0.2, etc.)

    • Package-level metadata (for example, npm tags)

  • Package version: A version of a package, such as @types/node 12.6.9. The version number format and semantics vary for different package formats. For example, npm package versions must conform to the Semantic Versioning specification. In CodeArtifact, a package version consists of the version identifier, metadata at the package version level, and a set of assets.

  • Upstream repository: One repository is upstream of another when the package versions in it can be accessed from the repository endpoint of the downstream repository, effectively merging the contents of the two repositories from the point of view of a client. CodeArtifact allows creating an upstream relationship between two repositories.

  • Asset: An individual file stored in CodeArtifact associated with a package version, such as an npm .tgz file or Maven POM and JAR files.

CodeArtifact supports these operations:

  • AssociateExternalConnection: Adds an existing external connection to a repository.

  • CopyPackageVersions: Copies package versions from one repository to another repository in the same domain.

  • CreateDomain: Creates a domain

  • CreateRepository: Creates a CodeArtifact repository in a domain.

  • DeleteDomain: Deletes a domain. You cannot delete a domain that contains repositories.

  • DeleteDomainPermissionsPolicy: Deletes the resource policy that is set on a domain.

  • DeletePackageVersions: Deletes versions of a package. After a package has been deleted, it can be republished, but its assets and metadata cannot be restored because they have been permanently removed from storage.

  • DeleteRepository: Deletes a repository.

  • DeleteRepositoryPermissionsPolicy: Deletes the resource policy that is set on a repository.

  • DescribeDomain: Returns a DomainDescription object that contains information about the requested domain.

  • DescribePackageVersion: Returns a PackageVersionDescription object that contains details about a package version.

  • DescribeRepository: Returns a RepositoryDescription object that contains detailed information about the requested repository.

  • DisposePackageVersions: Disposes versions of a package. A package version with the status Disposed cannot be restored because they have been permanently removed from storage.

  • DisassociateExternalConnection: Removes an existing external connection from a repository.

  • GetAuthorizationToken: Generates a temporary authorization token for accessing repositories in the domain. The token expires the authorization period has passed. The default authorization period is 12 hours and can be customized to any length with a maximum of 12 hours.

  • GetDomainPermissionsPolicy: Returns the policy of a resource that is attached to the specified domain.

  • GetPackageVersionAsset: Returns the contents of an asset that is in a package version.

  • GetPackageVersionReadme: Gets the readme file or descriptive text for a package version.

  • GetRepositoryEndpoint: Returns the endpoint of a repository for a specific package format. A repository has one endpoint for each package format:

    • npm

    • pypi

    • maven

    • nuget

  • GetRepositoryPermissionsPolicy: Returns the resource policy that is set on a repository.

  • ListDomains: Returns a list of DomainSummary objects. Each returned DomainSummary object contains information about a domain.

  • ListPackages: Lists the packages in a repository.

  • ListPackageVersionAssets: Lists the assets for a given package version.

  • ListPackageVersionDependencies: Returns a list of the direct dependencies for a package version.

  • ListPackageVersions: Returns a list of package versions for a specified package in a repository.

  • ListRepositories: Returns a list of repositories owned by the AWS account that called this method.

  • ListRepositoriesInDomain: Returns a list of the repositories in a domain.

  • ListTagsForResource: Returns a list of the tags associated with a resource.

  • PutDomainPermissionsPolicy: Attaches a resource policy to a domain.

  • PutRepositoryPermissionsPolicy: Sets the resource policy on a repository that specifies permissions to access it.

  • TagResource: Adds or updates tags for a resource.

  • UntagResource: Removes a tag from a resource.

  • UpdatePackageVersionsStatus: Updates the status of one or more versions of a package.

  • UpdateRepository: Updates the properties of a repository.

" } diff --git a/services/codebuild/pom.xml b/services/codebuild/pom.xml index 7d36d6aad4f2..73e16bebfadb 100644 --- a/services/codebuild/pom.xml +++ b/services/codebuild/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT codebuild AWS Java SDK :: Services :: AWS Code Build diff --git a/services/codebuild/src/main/resources/codegen-resources/service-2.json b/services/codebuild/src/main/resources/codegen-resources/service-2.json index 0f998c9741ee..208512eadedf 100644 --- a/services/codebuild/src/main/resources/codegen-resources/service-2.json +++ b/services/codebuild/src/main/resources/codegen-resources/service-2.json @@ -257,6 +257,19 @@ ], "documentation":"

Returns a list of details about test cases for a report.

" }, + "GetReportGroupTrend":{ + "name":"GetReportGroupTrend", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetReportGroupTrendInput"}, + "output":{"shape":"GetReportGroupTrendOutput"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"} + ] + }, "GetResourcePolicy":{ "name":"GetResourcePolicy", "http":{ @@ -497,7 +510,7 @@ {"shape":"InvalidInputException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Restarts a batch build.

" + "documentation":"

Restarts a failed batch build. Only batch builds that have failed can be retried.

" }, "StartBuild":{ "name":"StartBuild", @@ -1941,6 +1954,25 @@ "type":"list", "member":{"shape":"FilterGroup"} }, + "GetReportGroupTrendInput":{ + "type":"structure", + "required":[ + "reportGroupArn", + "trendField" + ], + "members":{ + "reportGroupArn":{"shape":"NonEmptyString"}, + "numOfReports":{"shape":"PageSize"}, + "trendField":{"shape":"ReportGroupTrendFieldType"} + } + }, + "GetReportGroupTrendOutput":{ + "type":"structure", + "members":{ + "stats":{"shape":"ReportGroupTrendStats"}, + "rawData":{"shape":"ReportGroupTrendRawDataList"} + } + }, "GetResourcePolicyInput":{ "type":"structure", "required":["resourceArn"], @@ -2766,7 +2798,7 @@ }, "modes":{ "shape":"ProjectCacheModes", - "documentation":"

If you use a LOCAL cache, the local cache mode. You can use one or more local cache modes at the same time.

  • LOCAL_SOURCE_CACHE mode caches Git metadata for primary and secondary sources. After the cache is created, subsequent builds pull only the change between commits. This mode is a good choice for projects with a clean working directory and a source that is a large Git repository. If you choose this option and your project does not use a Git repository (GitHub, GitHub Enterprise, or Bitbucket), the option is ignored.

  • LOCAL_DOCKER_LAYER_CACHE mode caches existing Docker layers. This mode is a good choice for projects that build or pull large Docker images. It can prevent the performance issues caused by pulling large Docker images down from the network.

    • You can use a Docker layer cache in the Linux environment only.

    • The privileged flag must be set so that your project has the required Docker permissions.

    • You should consider the security implications before you use a Docker layer cache.

  • LOCAL_CUSTOM_CACHE mode caches directories you specify in the buildspec file. This mode is a good choice if your build scenario is not suited to one of the other three local cache modes. If you use a custom cache:

    • Only directories can be specified for caching. You cannot specify individual files.

    • Symlinks are used to reference cached directories.

    • Cached directories are linked to your build before it downloads its project sources. Cached items are overridden if a source item has the same name. Directories are specified using cache paths in the buildspec file.

" + "documentation":"

An array of strings that specify the local cache modes. You can use one or more local cache modes at the same time. This is only used for LOCAL cache types.

Possible values are:

LOCAL_SOURCE_CACHE

Caches Git metadata for primary and secondary sources. After the cache is created, subsequent builds pull only the change between commits. This mode is a good choice for projects with a clean working directory and a source that is a large Git repository. If you choose this option and your project does not use a Git repository (GitHub, GitHub Enterprise, or Bitbucket), the option is ignored.

LOCAL_DOCKER_LAYER_CACHE

Caches existing Docker layers. This mode is a good choice for projects that build or pull large Docker images. It can prevent the performance issues caused by pulling large Docker images down from the network.

  • You can use a Docker layer cache in the Linux environment only.

  • The privileged flag must be set so that your project has the required Docker permissions.

  • You should consider the security implications before you use a Docker layer cache.

LOCAL_CUSTOM_CACHE

Caches directories you specify in the buildspec file. This mode is a good choice if your build scenario is not suited to one of the other three local cache modes. If you use a custom cache:

  • Only directories can be specified for caching. You cannot specify individual files.

  • Symlinks are used to reference cached directories.

  • Cached directories are linked to your build before it downloads its project sources. Cached items are overridden if a source item has the same name. Directories are specified using cache paths in the buildspec file.

" } }, "documentation":"

Information about the cache for the build project.

" @@ -2794,7 +2826,7 @@ }, "image":{ "shape":"NonEmptyString", - "documentation":"

The image tag or image digest that identifies the Docker image to use for this build project. Use the following formats:

  • For an image tag: <registry>/<repository>:<tag>. For example, in the Docker repository that CodeBuild uses to manage its Docker images, this would be aws/codebuild/standard:4.0. To specify the latest version of this image, this would be aws/codebuild/standard:latest.

  • For an image digest: <registry>/<repository>@<digest>. For example, to specify an image with the digest \"sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf,\" use <registry>/<repository>@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf.

" + "documentation":"

The image tag or image digest that identifies the Docker image to use for this build project. Use the following formats:

  • For an image tag: <registry>/<repository>:<tag>. For example, in the Docker repository that CodeBuild uses to manage its Docker images, this would be aws/codebuild/standard:4.0.

  • For an image digest: <registry>/<repository>@<digest>. For example, to specify an image with the digest \"sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf,\" use <registry>/<repository>@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf.

" }, "computeType":{ "shape":"ComputeType", @@ -2810,7 +2842,7 @@ }, "certificate":{ "shape":"String", - "documentation":"

The certificate to use with this build project.

" + "documentation":"

The ARN of the Amazon Simple Storage Service (Amazon S3) bucket, path prefix, and object key that contains the PEM-encoded certificate for the build project. For more information, see certificate in the AWS CodeBuild User Guide.

" }, "registryCredential":{ "shape":"RegistryCredential", @@ -2832,15 +2864,15 @@ }, "location":{ "shape":"String", - "documentation":"

A string that specifies the location of the file system created by Amazon EFS. Its format is efs-dns-name:/directory-path. You can find the DNS name of file system when you view it in the AWS EFS console. The directory path is a path to a directory in the file system that CodeBuild mounts. For example, if the DNS name of a file system is fs-abcd1234.efs.us-west-2.amazonaws.com, and its mount directory is my-efs-mount-directory, then the location is fs-abcd1234.efs.us-west-2.amazonaws.com:/my-efs-mount-directory.

The directory path in the format efs-dns-name:/directory-path is optional. If you do not specify a directory path, the location is only the DNS name and CodeBuild mounts the entire file system.

" + "documentation":"

A string that specifies the location of the file system created by Amazon EFS. Its format is efs-dns-name:/directory-path. You can find the DNS name of file system when you view it in the AWS EFS console. The directory path is a path to a directory in the file system that CodeBuild mounts. For example, if the DNS name of a file system is fs-abcd1234.efs.us-west-2.amazonaws.com, and its mount directory is my-efs-mount-directory, then the location is fs-abcd1234.efs.us-west-2.amazonaws.com:/my-efs-mount-directory.

The directory path in the format efs-dns-name:/directory-path is optional. If you do not specify a directory path, the location is only the DNS name and CodeBuild mounts the entire file system.

" }, "mountPoint":{ "shape":"String", - "documentation":"

The location in the container where you mount the file system.

" + "documentation":"

The location in the container where you mount the file system.

" }, "identifier":{ "shape":"String", - "documentation":"

The name used to access a file system created by Amazon EFS. CodeBuild creates an environment variable by appending the identifier in all capital letters to CODEBUILD_. For example, if you specify my-efs for identifier, a new environment variable is create named CODEBUILD_MY-EFS.

The identifier is used to mount your file system.

" + "documentation":"

The name used to access a file system created by Amazon EFS. CodeBuild creates an environment variable by appending the identifier in all capital letters to CODEBUILD_. For example, if you specify my_efs for identifier, a new environment variable is create named CODEBUILD_MY_EFS.

The identifier is used to mount your file system.

" }, "mountOptions":{ "shape":"String", @@ -3126,7 +3158,8 @@ "tags":{ "shape":"TagList", "documentation":"

A list of tag key and value pairs associated with this report group.

These tags are available for use by AWS services that support AWS CodeBuild report group tags.

" - } + }, + "status":{"shape":"ReportGroupStatusType"} }, "documentation":"

A series of reports. Each report contains information about the results from running a series of test cases. You specify the test cases for a report group in the buildspec for a build project using one or more paths to the test case files.

" }, @@ -3149,6 +3182,39 @@ "LAST_MODIFIED_TIME" ] }, + "ReportGroupStatusType":{ + "type":"string", + "enum":[ + "ACTIVE", + "DELETING" + ] + }, + "ReportGroupTrendFieldType":{ + "type":"string", + "enum":[ + "PASS_RATE", + "DURATION", + "TOTAL", + "LINE_COVERAGE", + "LINES_COVERED", + "LINES_MISSED", + "BRANCH_COVERAGE", + "BRANCHES_COVERED", + "BRANCHES_MISSED" + ] + }, + "ReportGroupTrendRawDataList":{ + "type":"list", + "member":{"shape":"ReportWithRawData"} + }, + "ReportGroupTrendStats":{ + "type":"structure", + "members":{ + "average":{"shape":"String"}, + "max":{"shape":"String"}, + "min":{"shape":"String"} + } + }, "ReportGroups":{ "type":"list", "member":{"shape":"ReportGroup"}, @@ -3184,6 +3250,13 @@ "CODE_COVERAGE" ] }, + "ReportWithRawData":{ + "type":"structure", + "members":{ + "reportArn":{"shape":"NonEmptyString"}, + "data":{"shape":"String"} + } + }, "Reports":{ "type":"list", "member":{"shape":"Report"}, diff --git a/services/codecommit/pom.xml b/services/codecommit/pom.xml index ac563bd83325..29e0e36b7267 100644 --- a/services/codecommit/pom.xml +++ b/services/codecommit/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT codecommit AWS Java SDK :: Services :: AWS CodeCommit diff --git a/services/codedeploy/pom.xml b/services/codedeploy/pom.xml index e9bf111b26da..12d28dfba117 100644 --- a/services/codedeploy/pom.xml +++ b/services/codedeploy/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT codedeploy AWS Java SDK :: Services :: AWS CodeDeploy diff --git a/services/codeguruprofiler/pom.xml b/services/codeguruprofiler/pom.xml index d3b552f227da..47199134dc80 100644 --- a/services/codeguruprofiler/pom.xml +++ b/services/codeguruprofiler/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT codeguruprofiler AWS Java SDK :: Services :: CodeGuruProfiler diff --git a/services/codegurureviewer/pom.xml b/services/codegurureviewer/pom.xml index 26be5b231991..942ce88e2617 100644 --- a/services/codegurureviewer/pom.xml +++ b/services/codegurureviewer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT codegurureviewer AWS Java SDK :: Services :: CodeGuru Reviewer diff --git a/services/codegurureviewer/src/main/resources/codegen-resources/service-2.json b/services/codegurureviewer/src/main/resources/codegen-resources/service-2.json index 626469815d88..09adc9daf5e3 100644 --- a/services/codegurureviewer/src/main/resources/codegen-resources/service-2.json +++ b/services/codegurureviewer/src/main/resources/codegen-resources/service-2.json @@ -46,7 +46,7 @@ {"shape":"AccessDeniedException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Use to create a code review for a repository analysis.

" + "documentation":"

Use to create a code review with a CodeReviewType of RepositoryAnalysis. This type of code review analyzes all code under a specified branch in an associated repository. PullRequest code reviews are automatically triggered by a pull request so cannot be created using this method.

" }, "DescribeCodeReview":{ "name":"DescribeCodeReview", @@ -182,6 +182,21 @@ ], "documentation":"

Returns a list of RepositoryAssociationSummary objects that contain summary information about a repository association. You can filter the returned list by ProviderType , Name , State , and Owner .

" }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Returns the list of tags associated with an associated repository resource.

" + }, "PutRecommendationFeedback":{ "name":"PutRecommendationFeedback", "http":{ @@ -198,6 +213,36 @@ {"shape":"ThrottlingException"} ], "documentation":"

Stores customer feedback for a CodeGuru Reviewer recommendation. When this API is called again with different reactions the previous feedback is overwritten.

" + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Adds one or more tags to an associated repository.

" + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Removes a tag from an associated repository.

" } }, "shapes":{ @@ -228,6 +273,10 @@ "shape":"ClientRequestToken", "documentation":"

Amazon CodeGuru Reviewer uses this value to prevent the accidental creation of duplicate repository associations if there are failures and retries.

", "idempotencyToken":true + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

An array of key-value pairs used to tag an associated repository. A tag is a custom attribute label with two parts:

  • A tag key (for example, CostCenter, Environment, Project, or Secret). Tag keys are case sensitive.

  • An optional field known as a tag value (for example, 111122223333, Production, or a team name). Omitting the tag value is the same as using an empty string. Like tag keys, tag values are case sensitive.

" } } }, @@ -237,9 +286,19 @@ "RepositoryAssociation":{ "shape":"RepositoryAssociation", "documentation":"

Information about the repository association.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

An array of key-value pairs used to tag an associated repository. A tag is a custom attribute label with two parts:

  • A tag key (for example, CostCenter, Environment, Project, or Secret). Tag keys are case sensitive.

  • An optional field known as a tag value (for example, 111122223333, Production, or a team name). Omitting the tag value is the same as using an empty string. Like tag keys, tag values are case sensitive.

" } } }, + "AssociationArn":{ + "type":"string", + "max":1600, + "min":1, + "pattern":"^arn:aws[^:\\s]*:codeguru-reviewer:[^:\\s]+:[\\d]{12}:association:[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$" + }, "AssociationId":{ "type":"string", "max":64, @@ -318,12 +377,16 @@ "shape":"SourceCodeType", "documentation":"

The type of the source code for the code review.

" }, + "AssociationArn":{ + "shape":"AssociationArn", + "documentation":"

The Amazon Resource Name (ARN) of the RepositoryAssociation that contains the reviewed source code. You can retrieve associated repository ARNs by calling ListRepositoryAssociations .

" + }, "Metrics":{ "shape":"Metrics", "documentation":"

The statistics from the code review.

" } }, - "documentation":"

Information about a code review.

" + "documentation":"

Information about a code review. A code review belongs to the associated repository that contains the reviewed code.

" }, "CodeReviewName":{ "type":"string", @@ -391,7 +454,7 @@ "members":{ "RepositoryAnalysis":{ "shape":"RepositoryAnalysis", - "documentation":"

A code review that analyzes all code under a specified branch in an associated respository. The assocated repository is specified using its ARN in CreateCodeReview

" + "documentation":"

A code review that analyzes all code under a specified branch in an associated respository. The assocated repository is specified using its ARN in CreateCodeReview .

" } }, "documentation":"

The type of a code review. There are two code review types:

  • PullRequest - A code review that is automatically triggered by a pull request on an assocaited repository. Because this type of code review is automatically generated, you cannot specify this code review type using CreateCodeReview .

  • RepositoryAnalysis - A code review that analyzes all code under a specified branch in an associated respository. The assocated repository is specified using its ARN in CreateCodeReview .

" @@ -440,15 +503,15 @@ "members":{ "Name":{ "shape":"CodeReviewName", - "documentation":"

The name of the code review. Each code review of the same code review type must have a unique name in your AWS account.

" + "documentation":"

The name of the code review. The name of each code review in your AWS account must be unique.

" }, "RepositoryAssociationArn":{ - "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the RepositoryAssociation object. You can retrieve this ARN by calling ListRepositories.

A code review can only be created on an associated repository. This is the ARN of the associated repository.

" + "shape":"AssociationArn", + "documentation":"

The Amazon Resource Name (ARN) of the RepositoryAssociation object. You can retrieve this ARN by calling ListRepositoryAssociations .

A code review can only be created on an associated repository. This is the ARN of the associated repository.

" }, "Type":{ "shape":"CodeReviewType", - "documentation":"

The type of code review to create. This is specified using a CodeReviewType object.

" + "documentation":"

The type of code review to create. This is specified using a CodeReviewType object. You can create a code review only of type RepositoryAnalysis.

" }, "ClientRequestToken":{ "shape":"ClientRequestToken", @@ -525,8 +588,8 @@ "required":["AssociationArn"], "members":{ "AssociationArn":{ - "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the RepositoryAssociation object. You can retrieve this ARN by calling ListRepositories.

", + "shape":"AssociationArn", + "documentation":"

The Amazon Resource Name (ARN) of the RepositoryAssociation object. You can retrieve this ARN by calling ListRepositoryAssociations .

", "location":"uri", "locationName":"AssociationArn" } @@ -538,6 +601,10 @@ "RepositoryAssociation":{ "shape":"RepositoryAssociation", "documentation":"

Information about the repository association.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

An array of key-value pairs used to tag an associated repository. A tag is a custom attribute label with two parts:

  • A tag key (for example, CostCenter, Environment, Project, or Secret). Tag keys are case sensitive.

  • An optional field known as a tag value (for example, 111122223333, Production, or a team name). Omitting the tag value is the same as using an empty string. Like tag keys, tag values are case sensitive.

" } } }, @@ -546,8 +613,8 @@ "required":["AssociationArn"], "members":{ "AssociationArn":{ - "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the RepositoryAssociation object. You can retrieve this ARN by calling ListRepositories.

", + "shape":"AssociationArn", + "documentation":"

The Amazon Resource Name (ARN) of the RepositoryAssociation object. You can retrieve this ARN by calling ListRepositoryAssociations .

", "location":"uri", "locationName":"AssociationArn" } @@ -559,6 +626,10 @@ "RepositoryAssociation":{ "shape":"RepositoryAssociation", "documentation":"

Information about the disassociated repository.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

An array of key-value pairs used to tag an associated repository. A tag is a custom attribute label with two parts:

  • A tag key (for example, CostCenter, Environment, Project, or Secret). Tag keys are case sensitive.

  • An optional field known as a tag value (for example, 111122223333, Production, or a team name). Omitting the tag value is the same as using an empty string. Like tag keys, tag values are case sensitive.

" } } }, @@ -752,7 +823,7 @@ }, "States":{ "shape":"RepositoryAssociationStates", - "documentation":"

List of repository association states to use as a filter.

The valid repository association states are:

  • Associated: The repository association is complete.

  • Associating: CodeGuru Reviewer is:

    • Setting up pull request notifications. This is required for pull requests to trigger a CodeGuru Reviewer review.

      If your repository ProviderType is GitHub, GitHub Enterprise Server, or Bitbucket, CodeGuru Reviewer creates webhooks in your repository to trigger CodeGuru Reviewer reviews. If you delete these webhooks, reviews of code in your repository cannot be triggered.

    • Setting up source code access. This is required for CodeGuru Reviewer to securely clone code in your repository.

  • Failed: The repository failed to associate or disassociate.

  • Disassociating: CodeGuru Reviewer is removing the repository's pull request notifications and source code access.

", + "documentation":"

List of repository association states to use as a filter.

The valid repository association states are:

  • Associated: The repository association is complete.

  • Associating: CodeGuru Reviewer is:

    • Setting up pull request notifications. This is required for pull requests to trigger a CodeGuru Reviewer review.

      If your repository ProviderType is GitHub, GitHub Enterprise Server, or Bitbucket, CodeGuru Reviewer creates webhooks in your repository to trigger CodeGuru Reviewer reviews. If you delete these webhooks, reviews of code in your repository cannot be triggered.

    • Setting up source code access. This is required for CodeGuru Reviewer to securely clone code in your repository.

  • Failed: The repository failed to associate or disassociate.

  • Disassociating: CodeGuru Reviewer is removing the repository's pull request notifications and source code access.

  • Disassociated: CodeGuru Reviewer successfully disassociated the repository. You can create a new association with this repository if you want to review source code in it later. You can control access to code reviews created in an associated repository with tags after it has been disassociated. For more information, see Using tags to control access to associated repositories in the Amazon CodeGuru Reviewer User Guide.

", "location":"querystring", "locationName":"State" }, @@ -795,6 +866,27 @@ } } }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"AssociationArn", + "documentation":"

The Amazon Resource Name (ARN) of the RepositoryAssociation object. You can retrieve this ARN by calling ListRepositoryAssociations .

", + "location":"uri", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"TagMap", + "documentation":"

An array of key-value pairs used to tag an associated repository. A tag is a custom attribute label with two parts:

  • A tag key (for example, CostCenter, Environment, Project, or Secret). Tag keys are case sensitive.

  • An optional field known as a tag value (for example, 111122223333, Production, or a team name). Omitting the tag value is the same as using an empty string. Like tag keys, tag values are case sensitive.

" + } + } + }, "MaxResults":{ "type":"integer", "max":100, @@ -1078,7 +1170,7 @@ }, "State":{ "shape":"RepositoryAssociationState", - "documentation":"

The state of the repository association.

The valid repository association states are:

  • Associated: The repository association is complete.

  • Associating: CodeGuru Reviewer is:

    • Setting up pull request notifications. This is required for pull requests to trigger a CodeGuru Reviewer review.

      If your repository ProviderType is GitHub, GitHub Enterprise Server, or Bitbucket, CodeGuru Reviewer creates webhooks in your repository to trigger CodeGuru Reviewer reviews. If you delete these webhooks, reviews of code in your repository cannot be triggered.

    • Setting up source code access. This is required for CodeGuru Reviewer to securely clone code in your repository.

  • Failed: The repository failed to associate or disassociate.

  • Disassociating: CodeGuru Reviewer is removing the repository's pull request notifications and source code access.

" + "documentation":"

The state of the repository association.

The valid repository association states are:

  • Associated: The repository association is complete.

  • Associating: CodeGuru Reviewer is:

    • Setting up pull request notifications. This is required for pull requests to trigger a CodeGuru Reviewer review.

      If your repository ProviderType is GitHub, GitHub Enterprise Server, or Bitbucket, CodeGuru Reviewer creates webhooks in your repository to trigger CodeGuru Reviewer reviews. If you delete these webhooks, reviews of code in your repository cannot be triggered.

    • Setting up source code access. This is required for CodeGuru Reviewer to securely clone code in your repository.

  • Failed: The repository failed to associate or disassociate.

  • Disassociating: CodeGuru Reviewer is removing the repository's pull request notifications and source code access.

  • Disassociated: CodeGuru Reviewer successfully disassociated the repository. You can create a new association with this repository if you want to review source code in it later. You can control access to code reviews created in an associated repository with tags after it has been disassociated. For more information, see Using tags to control access to associated repositories in the Amazon CodeGuru Reviewer User Guide.

" }, "StateReason":{ "shape":"StateReason", @@ -1101,13 +1193,14 @@ "Associated", "Associating", "Failed", - "Disassociating" + "Disassociating", + "Disassociated" ] }, "RepositoryAssociationStates":{ "type":"list", "member":{"shape":"RepositoryAssociationState"}, - "max":3, + "max":5, "min":1 }, "RepositoryAssociationSummaries":{ @@ -1119,7 +1212,7 @@ "members":{ "AssociationArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the RepositoryAssociation object. You can retrieve this ARN by calling ListRepositories.

" + "documentation":"

The Amazon Resource Name (ARN) of the RepositoryAssociation object. You can retrieve this ARN by calling ListRepositoryAssociations .

" }, "ConnectionArn":{ "shape":"ConnectionArn", @@ -1147,7 +1240,7 @@ }, "State":{ "shape":"RepositoryAssociationState", - "documentation":"

The state of the repository association.

The valid repository association states are:

  • Associated: The repository association is complete.

  • Associating: CodeGuru Reviewer is:

    • Setting up pull request notifications. This is required for pull requests to trigger a CodeGuru Reviewer review.

      If your repository ProviderType is GitHub, GitHub Enterprise Server, or Bitbucket, CodeGuru Reviewer creates webhooks in your repository to trigger CodeGuru Reviewer reviews. If you delete these webhooks, reviews of code in your repository cannot be triggered.

    • Setting up source code access. This is required for CodeGuru Reviewer to securely clone code in your repository.

  • Failed: The repository failed to associate or disassociate.

  • Disassociating: CodeGuru Reviewer is removing the repository's pull request notifications and source code access.

" + "documentation":"

The state of the repository association.

The valid repository association states are:

  • Associated: The repository association is complete.

  • Associating: CodeGuru Reviewer is:

    • Setting up pull request notifications. This is required for pull requests to trigger a CodeGuru Reviewer review.

      If your repository ProviderType is GitHub, GitHub Enterprise Server, or Bitbucket, CodeGuru Reviewer creates webhooks in your repository to trigger CodeGuru Reviewer reviews. If you delete these webhooks, reviews of code in your repository cannot be triggered.

    • Setting up source code access. This is required for CodeGuru Reviewer to securely clone code in your repository.

  • Failed: The repository failed to associate or disassociate.

  • Disassociating: CodeGuru Reviewer is removing the repository's pull request notifications and source code access.

  • Disassociated: CodeGuru Reviewer successfully disassociated the repository. You can create a new association with this repository if you want to review source code in it later. You can control access to code reviews created in an associated repository with tags after it has been disassociated. For more information, see Using tags to control access to associated repositories in the Amazon CodeGuru Reviewer User Guide.

" } }, "documentation":"

Summary information about a repository association. The ListRepositoryAssociations operation returns a list of RepositoryAssociationSummary objects.

" @@ -1194,6 +1287,52 @@ "max":256, "min":0 }, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":50, + "min":1 + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":50, + "min":1 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "Tags" + ], + "members":{ + "resourceArn":{ + "shape":"AssociationArn", + "documentation":"

The Amazon Resource Name (ARN) of the RepositoryAssociation object. You can retrieve this ARN by calling ListRepositoryAssociations .

", + "location":"uri", + "locationName":"resourceArn" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

An array of key-value pairs used to tag an associated repository. A tag is a custom attribute label with two parts:

  • A tag key (for example, CostCenter, Environment, Project, or Secret). Tag keys are case sensitive.

  • An optional field known as a tag value (for example, 111122223333, Production, or a team name). Omitting the tag value is the same as using an empty string. Like tag keys, tag values are case sensitive.

" + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256 + }, "Text":{ "type":"string", "max":2048, @@ -1239,6 +1378,32 @@ "RepositoryAnalysis" ] }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "TagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"AssociationArn", + "documentation":"

The Amazon Resource Name (ARN) of the RepositoryAssociation object. You can retrieve this ARN by calling ListRepositoryAssociations .

", + "location":"uri", + "locationName":"resourceArn" + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

A list of the keys for each tag you want to remove from an associated repository.

", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, "UserId":{ "type":"string", "max":256, @@ -1260,5 +1425,5 @@ "exception":true } }, - "documentation":"

This section provides documentation for the Amazon CodeGuru Reviewer API operations. CodeGuru Reviewer is a service that uses program analysis and machine learning to detect potential defects that are difficult for developers to find and recommends fixes in your Java code.

By proactively detecting and providing recommendations for addressing code defects and implementing best practices, CodeGuru Reviewer improves the overall quality and maintainability of your code base during the code review stage. For more information about CodeGuru Reviewer, see the Amazon CodeGuru Reviewer User Guide.

" + "documentation":"

This section provides documentation for the Amazon CodeGuru Reviewer API operations. CodeGuru Reviewer is a service that uses program analysis and machine learning to detect potential defects that are difficult for developers to find and recommends fixes in your Java code.

By proactively detecting and providing recommendations for addressing code defects and implementing best practices, CodeGuru Reviewer improves the overall quality and maintainability of your code base during the code review stage. For more information about CodeGuru Reviewer, see the Amazon CodeGuru Reviewer User Guide.

To improve the security of your CodeGuru Reviewer API calls, you can establish a private connection between your VPC and CodeGuru Reviewer by creating an interface VPC endpoint. For more information, see CodeGuru Reviewer and interface VPC endpoints (AWS PrivateLink) in the Amazon CodeGuru Reviewer User Guide.

" } diff --git a/services/codepipeline/pom.xml b/services/codepipeline/pom.xml index 627f61647c39..3e7a2ce4feea 100644 --- a/services/codepipeline/pom.xml +++ b/services/codepipeline/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT codepipeline AWS Java SDK :: Services :: AWS CodePipeline diff --git a/services/codepipeline/src/main/resources/codegen-resources/service-2.json b/services/codepipeline/src/main/resources/codegen-resources/service-2.json index 3ba303badb11..c858dd61d07a 100644 --- a/services/codepipeline/src/main/resources/codegen-resources/service-2.json +++ b/services/codepipeline/src/main/resources/codegen-resources/service-2.json @@ -493,6 +493,7 @@ "output":{"shape":"RetryStageExecutionOutput"}, "errors":[ {"shape":"ValidationException"}, + {"shape":"ConflictException"}, {"shape":"PipelineNotFoundException"}, {"shape":"StageNotFoundException"}, {"shape":"StageNotRetryableException"}, @@ -510,6 +511,7 @@ "output":{"shape":"StartPipelineExecutionOutput"}, "errors":[ {"shape":"ValidationException"}, + {"shape":"ConflictException"}, {"shape":"PipelineNotFoundException"} ], "documentation":"

Starts the specified pipeline. Specifically, it begins processing the latest commit to the source location specified as part of the pipeline.

" @@ -524,6 +526,7 @@ "output":{"shape":"StopPipelineExecutionOutput"}, "errors":[ {"shape":"ValidationException"}, + {"shape":"ConflictException"}, {"shape":"PipelineNotFoundException"}, {"shape":"PipelineExecutionNotStoppableException"}, {"shape":"DuplicatedStopRequestException"} @@ -838,6 +841,10 @@ "ActionExecution":{ "type":"structure", "members":{ + "actionExecutionId":{ + "shape":"ActionExecutionId", + "documentation":"

ID of the workflow action execution in the current stage. Use the GetPipelineState action to retrieve the current action execution details of the current stage.

For older executions, this field might be empty. The action execution ID is available for executions run on or after March 2020.

" + }, "status":{ "shape":"ActionExecutionStatus", "documentation":"

The status of the action, or for a completed action, the last status of the action.

" @@ -1148,11 +1155,11 @@ "members":{ "category":{ "shape":"ActionCategory", - "documentation":"

A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Valid categories are limited to one of the following values.

" + "documentation":"

A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Valid categories are limited to one of the following values.

  • Source

  • Build

  • Test

  • Deploy

  • Invoke

  • Approval

" }, "owner":{ "shape":"ActionOwner", - "documentation":"

The creator of the action being called.

" + "documentation":"

The creator of the action being called. There are three valid values for the Owner field in the action category section within your pipeline structure: AWS, ThirdParty, and Custom. For more information, see Valid Action Types and Providers in CodePipeline.

" }, "provider":{ "shape":"ActionProvider", @@ -1444,6 +1451,14 @@ "documentation":"

Unable to modify the tag due to a simultaneous update request.

", "exception":true }, + "ConflictException":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + }, + "documentation":"

Your request cannot be handled because the pipeline is busy handling ongoing activities. Try again later.

", + "exception":true + }, "ContinuationToken":{ "type":"string", "max":2048, @@ -1461,7 +1476,7 @@ "members":{ "category":{ "shape":"ActionCategory", - "documentation":"

The category of the custom action, such as a build action or a test action.

Although Source and Approval are listed as valid values, they are not currently functional. These values are reserved for future use.

" + "documentation":"

The category of the custom action, such as a build action or a test action.

" }, "provider":{ "shape":"ActionProvider", @@ -1718,7 +1733,7 @@ }, "EncryptionKeyId":{ "type":"string", - "max":100, + "max":400, "min":1 }, "EncryptionKeyType":{ @@ -2534,7 +2549,7 @@ "members":{ "name":{ "shape":"PipelineName", - "documentation":"

The name of the action to be performed.

" + "documentation":"

The name of the pipeline.

" }, "roleArn":{ "shape":"RoleArn", @@ -3259,6 +3274,7 @@ "shape":"StageName", "documentation":"

The name of the stage.

" }, + "inboundExecution":{"shape":"StageExecution"}, "inboundTransitionState":{ "shape":"TransitionState", "documentation":"

The state of the inbound transition, which is either enabled or disabled.

" diff --git a/services/codestar/pom.xml b/services/codestar/pom.xml index 0161c102db40..a8a5657ec7a9 100644 --- a/services/codestar/pom.xml +++ b/services/codestar/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT codestar AWS Java SDK :: Services :: AWS CodeStar diff --git a/services/codestarconnections/pom.xml b/services/codestarconnections/pom.xml index 1772c3a07942..b51bd6954da9 100644 --- a/services/codestarconnections/pom.xml +++ b/services/codestarconnections/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT codestarconnections AWS Java SDK :: Services :: CodeStar connections diff --git a/services/codestarconnections/src/main/resources/codegen-resources/service-2.json b/services/codestarconnections/src/main/resources/codegen-resources/service-2.json index 8086a7f75c5b..7c3dce35a31a 100644 --- a/services/codestarconnections/src/main/resources/codegen-resources/service-2.json +++ b/services/codestarconnections/src/main/resources/codegen-resources/service-2.json @@ -91,7 +91,8 @@ "input":{"shape":"GetHostInput"}, "output":{"shape":"GetHostOutput"}, "errors":[ - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceUnavailableException"} ], "documentation":"

Returns the host ARN and details such as status, provider type, endpoint, and, if applicable, the VPC configuration.

" }, @@ -154,6 +155,22 @@ {"shape":"ResourceNotFoundException"} ], "documentation":"

Removes tags from an AWS resource.

" + }, + "UpdateHost":{ + "name":"UpdateHost", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateHostInput"}, + "output":{"shape":"UpdateHostOutput"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceUnavailableException"}, + {"shape":"UnsupportedOperationException"} + ], + "documentation":"

Updates a specified host with the provided configurations.

" } }, "shapes":{ @@ -166,7 +183,16 @@ "AmazonResourceName":{ "type":"string", "max":1011, - "min":1 + "min":1, + "pattern":"arn:aws(-[\\w]+)*:.+:.+:[0-9]{12}:.+" + }, + "ConflictException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

Two conflicting operations have been made on the same resource.

", + "exception":true }, "Connection":{ "type":"structure", @@ -181,7 +207,7 @@ }, "ProviderType":{ "shape":"ProviderType", - "documentation":"

The name of the external provider where your third-party code repository is configured. The valid provider type is Bitbucket.

" + "documentation":"

The name of the external provider where your third-party code repository is configured.

" }, "OwnerAccountId":{ "shape":"AccountId", @@ -211,7 +237,8 @@ "ConnectionName":{ "type":"string", "max":32, - "min":1 + "min":1, + "pattern":"[\\s\\S]*" }, "ConnectionStatus":{ "type":"string", @@ -227,7 +254,7 @@ "members":{ "ProviderType":{ "shape":"ProviderType", - "documentation":"

The name of the external provider where your third-party code repository is configured. The valid provider type is Bitbucket.

" + "documentation":"

The name of the external provider where your third-party code repository is configured.

" }, "ConnectionName":{ "shape":"ConnectionName", @@ -426,8 +453,9 @@ }, "HostName":{ "type":"string", - "max":32, - "min":1 + "max":64, + "min":1, + "pattern":".*" }, "HostStatus":{"type":"string"}, "HostStatusMessage":{"type":"string"}, @@ -526,7 +554,8 @@ "NextToken":{ "type":"string", "max":1024, - "min":1 + "min":1, + "pattern":".*" }, "ProviderType":{ "type":"string", @@ -554,6 +583,8 @@ }, "SecurityGroupId":{ "type":"string", + "max":20, + "min":11, "pattern":"sg-\\w{8}(\\w{9})?" }, "SecurityGroupIds":{ @@ -564,6 +595,8 @@ }, "SubnetId":{ "type":"string", + "max":24, + "min":15, "pattern":"subnet-\\w{8}(\\w{9})?" }, "SubnetIds":{ @@ -593,7 +626,8 @@ "TagKey":{ "type":"string", "max":128, - "min":1 + "min":1, + "pattern":".*" }, "TagKeyList":{ "type":"list", @@ -632,12 +666,22 @@ "TagValue":{ "type":"string", "max":256, - "min":0 + "min":0, + "pattern":".*" }, "TlsCertificate":{ "type":"string", "max":16384, - "min":1 + "min":1, + "pattern":"[\\s\\S]*" + }, + "UnsupportedOperationException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The operation is not supported. Check the connection status and try again.

", + "exception":true }, "UntagResourceInput":{ "type":"structure", @@ -661,10 +705,34 @@ "members":{ } }, + "UpdateHostInput":{ + "type":"structure", + "required":["HostArn"], + "members":{ + "HostArn":{ + "shape":"HostArn", + "documentation":"

The Amazon Resource Name (ARN) of the host to be updated.

" + }, + "ProviderEndpoint":{ + "shape":"Url", + "documentation":"

The URL or endpoint of the host to be updated.

" + }, + "VpcConfiguration":{ + "shape":"VpcConfiguration", + "documentation":"

The VPC configuration of the host to be updated. A VPC must be configured and the infrastructure to be represented by the host must already be connected to the VPC.

" + } + } + }, + "UpdateHostOutput":{ + "type":"structure", + "members":{ + } + }, "Url":{ "type":"string", "max":512, - "min":1 + "min":1, + "pattern":".*" }, "VpcConfiguration":{ "type":"structure", @@ -695,8 +763,10 @@ }, "VpcId":{ "type":"string", + "max":21, + "min":12, "pattern":"vpc-\\w{8}(\\w{9})?" } }, - "documentation":"AWS CodeStar Connections

The CodeStar Connections feature is in preview release and is subject to change.

This AWS CodeStar Connections API Reference provides descriptions and usage examples of the operations and data types for the AWS CodeStar Connections API. You can use the connections API to work with connections and installations.

Connections are configurations that you use to connect AWS resources to external code repositories. Each connection is a resource that can be given to services such as CodePipeline to connect to a third-party repository such as Bitbucket. For example, you can add the connection in CodePipeline so that it triggers your pipeline when a code change is made to your third-party code repository. Each connection is named and associated with a unique ARN that is used to reference the connection.

When you create a connection, the console initiates a third-party connection handshake. Installations are the apps that are used to conduct this handshake. For example, the installation for the Bitbucket provider type is the Bitbucket Cloud app. When you create a connection, you can choose an existing installation or create one.

When you want to create a connection to an installed provider type such as GitHub Enterprise Server, you create a host for your connections.

You can work with connections by calling:

  • CreateConnection, which creates a uniquely named connection that can be referenced by services such as CodePipeline.

  • DeleteConnection, which deletes the specified connection.

  • GetConnection, which returns information about the connection, including the connection status.

  • ListConnections, which lists the connections associated with your account.

You can work with hosts by calling:

  • CreateHost, which creates a host that represents the infrastructure where your provider is installed.

  • DeleteHost, which deletes the specified host.

  • GetHost, which returns information about the host, including the setup status.

  • ListHosts, which lists the hosts associated with your account.

You can work with tags in AWS CodeStar Connections by calling the following:

  • ListTagsForResource, which gets information about AWS tags for a specified Amazon Resource Name (ARN) in AWS CodeStar Connections.

  • TagResource, which adds or updates tags for a resource in AWS CodeStar Connections.

  • UntagResource, which removes tags for a resource in AWS CodeStar Connections.

For information about how to use AWS CodeStar Connections, see the Developer Tools User Guide.

" + "documentation":"AWS CodeStar Connections

This AWS CodeStar Connections API Reference provides descriptions and usage examples of the operations and data types for the AWS CodeStar Connections API. You can use the connections API to work with connections and installations.

Connections are configurations that you use to connect AWS resources to external code repositories. Each connection is a resource that can be given to services such as CodePipeline to connect to a third-party repository such as Bitbucket. For example, you can add the connection in CodePipeline so that it triggers your pipeline when a code change is made to your third-party code repository. Each connection is named and associated with a unique ARN that is used to reference the connection.

When you create a connection, the console initiates a third-party connection handshake. Installations are the apps that are used to conduct this handshake. For example, the installation for the Bitbucket provider type is the Bitbucket app. When you create a connection, you can choose an existing installation or create one.

When you want to create a connection to an installed provider type such as GitHub Enterprise Server, you create a host for your connections.

You can work with connections by calling:

  • CreateConnection, which creates a uniquely named connection that can be referenced by services such as CodePipeline.

  • DeleteConnection, which deletes the specified connection.

  • GetConnection, which returns information about the connection, including the connection status.

  • ListConnections, which lists the connections associated with your account.

You can work with hosts by calling:

  • CreateHost, which creates a host that represents the infrastructure where your provider is installed.

  • DeleteHost, which deletes the specified host.

  • GetHost, which returns information about the host, including the setup status.

  • ListHosts, which lists the hosts associated with your account.

You can work with tags in AWS CodeStar Connections by calling the following:

  • ListTagsForResource, which gets information about AWS tags for a specified Amazon Resource Name (ARN) in AWS CodeStar Connections.

  • TagResource, which adds or updates tags for a resource in AWS CodeStar Connections.

  • UntagResource, which removes tags for a resource in AWS CodeStar Connections.

For information about how to use AWS CodeStar Connections, see the Developer Tools User Guide.

" } diff --git a/services/codestarnotifications/pom.xml b/services/codestarnotifications/pom.xml index 0cb185539bda..f3ebcd8376fa 100644 --- a/services/codestarnotifications/pom.xml +++ b/services/codestarnotifications/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT codestarnotifications AWS Java SDK :: Services :: Codestar Notifications diff --git a/services/cognitoidentity/pom.xml b/services/cognitoidentity/pom.xml index a7958cebcdf0..a78d861e2a25 100644 --- a/services/cognitoidentity/pom.xml +++ b/services/cognitoidentity/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT cognitoidentity AWS Java SDK :: Services :: Amazon Cognito Identity diff --git a/services/cognitoidentity/src/main/resources/codegen-resources/paginators-1.json b/services/cognitoidentity/src/main/resources/codegen-resources/paginators-1.json index 5677bd8e4a2d..0e02fc699b91 100644 --- a/services/cognitoidentity/src/main/resources/codegen-resources/paginators-1.json +++ b/services/cognitoidentity/src/main/resources/codegen-resources/paginators-1.json @@ -1,4 +1,10 @@ { "pagination": { + "ListIdentityPools": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "IdentityPools" + } } -} +} \ No newline at end of file diff --git a/services/cognitoidentity/src/main/resources/codegen-resources/service-2.json b/services/cognitoidentity/src/main/resources/codegen-resources/service-2.json index 3ee6ced33d63..1e23304e3542 100644 --- a/services/cognitoidentity/src/main/resources/codegen-resources/service-2.json +++ b/services/cognitoidentity/src/main/resources/codegen-resources/service-2.json @@ -113,7 +113,8 @@ {"shape":"InternalErrorException"}, {"shape":"ExternalServiceException"} ], - "documentation":"

Returns credentials for the provided identity ID. Any provided logins will be validated against supported login providers. If the token is for cognito-identity.amazonaws.com, it will be passed through to AWS Security Token Service with the appropriate role for the token.

This is a public API. You do not need any credentials to call this API.

" + "documentation":"

Returns credentials for the provided identity ID. Any provided logins will be validated against supported login providers. If the token is for cognito-identity.amazonaws.com, it will be passed through to AWS Security Token Service with the appropriate role for the token.

This is a public API. You do not need any credentials to call this API.

", + "authtype":"none" }, "GetId":{ "name":"GetId", @@ -133,7 +134,8 @@ {"shape":"LimitExceededException"}, {"shape":"ExternalServiceException"} ], - "documentation":"

Generates (or retrieves) a Cognito ID. Supplying multiple logins will create an implicit linked account.

This is a public API. You do not need any credentials to call this API.

" + "documentation":"

Generates (or retrieves) a Cognito ID. Supplying multiple logins will create an implicit linked account.

This is a public API. You do not need any credentials to call this API.

", + "authtype":"none" }, "GetIdentityPoolRoles":{ "name":"GetIdentityPoolRoles", @@ -170,7 +172,8 @@ {"shape":"InternalErrorException"}, {"shape":"ExternalServiceException"} ], - "documentation":"

Gets an OpenID token, using a known Cognito ID. This known Cognito ID is returned by GetId. You can optionally add additional logins for the identity. Supplying multiple logins creates an implicit link.

The OpenId token is valid for 10 minutes.

This is a public API. You do not need any credentials to call this API.

" + "documentation":"

Gets an OpenID token, using a known Cognito ID. This known Cognito ID is returned by GetId. You can optionally add additional logins for the identity. Supplying multiple logins creates an implicit link.

The OpenId token is valid for 10 minutes.

This is a public API. You do not need any credentials to call this API.

", + "authtype":"none" }, "GetOpenIdTokenForDeveloperIdentity":{ "name":"GetOpenIdTokenForDeveloperIdentity", @@ -346,7 +349,8 @@ {"shape":"InternalErrorException"}, {"shape":"ExternalServiceException"} ], - "documentation":"

Unlinks a federated identity from an existing account. Unlinked logins will be considered new identities next time they are seen. Removing the last linked login will make this identity inaccessible.

This is a public API. You do not need any credentials to call this API.

" + "documentation":"

Unlinks a federated identity from an existing account. Unlinked logins will be considered new identities next time they are seen. Removing the last linked login will make this identity inaccessible.

This is a public API. You do not need any credentials to call this API.

", + "authtype":"none" }, "UntagResource":{ "name":"UntagResource", @@ -1176,7 +1180,7 @@ "MappingRulesList":{ "type":"list", "member":{"shape":"MappingRule"}, - "max":25, + "max":400, "min":1 }, "MergeDeveloperIdentitiesInput":{ @@ -1235,6 +1239,7 @@ "OIDCToken":{"type":"string"}, "PaginationKey":{ "type":"string", + "max":65535, "min":1, "pattern":"[\\S]+" }, @@ -1299,6 +1304,8 @@ }, "RoleType":{ "type":"string", + "max":128, + "min":1, "pattern":"(un)?authenticated" }, "RolesMap":{ diff --git a/services/cognitoidentityprovider/pom.xml b/services/cognitoidentityprovider/pom.xml index d653575c53bb..c0c2918374ad 100644 --- a/services/cognitoidentityprovider/pom.xml +++ b/services/cognitoidentityprovider/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT cognitoidentityprovider AWS Java SDK :: Services :: Amazon Cognito Identity Provider Service diff --git a/services/cognitoidentityprovider/src/main/resources/codegen-resources/service-2.json b/services/cognitoidentityprovider/src/main/resources/codegen-resources/service-2.json index f2ac09aaea18..dd56c8251fcd 100755 --- a/services/cognitoidentityprovider/src/main/resources/codegen-resources/service-2.json +++ b/services/cognitoidentityprovider/src/main/resources/codegen-resources/service-2.json @@ -1618,7 +1618,7 @@ {"shape":"UserNotConfirmedException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Set the user's multi-factor authentication (MFA) method preference, including which MFA factors are enabled and if any are preferred. Only one factor can be set as preferred. The preferred MFA factor will be used to authenticate a user if multiple factors are enabled. If multiple options are enabled and no preference is set, a challenge to choose an MFA option will be returned during sign in.

" + "documentation":"

Set the user's multi-factor authentication (MFA) method preference, including which MFA factors are enabled and if any are preferred. Only one factor can be set as preferred. The preferred MFA factor will be used to authenticate a user if multiple factors are enabled. If multiple options are enabled and no preference is set, a challenge to choose an MFA option will be returned during sign in. If an MFA type is enabled for a user, the user will be prompted for MFA during all sign in attempts, unless device tracking is turned on and the device has been trusted. If you would like MFA to be applied selectively based on the assessed risk level of sign in attempts, disable MFA for users and turn on Adaptive Authentication for the user pool.

" }, "SetUserPoolMfaConfig":{ "name":"SetUserPoolMfaConfig", @@ -3941,11 +3941,11 @@ }, "EmailVerificationMessage":{ "shape":"EmailVerificationMessageType", - "documentation":"

A string representing the email verification message.

" + "documentation":"

A string representing the email verification message. EmailVerificationMessage is allowed only if EmailSendingAccount is DEVELOPER.

" }, "EmailVerificationSubject":{ "shape":"EmailVerificationSubjectType", - "documentation":"

A string representing the email verification subject.

" + "documentation":"

A string representing the email verification subject. EmailVerificationSubject is allowed only if EmailSendingAccount is DEVELOPER.

" }, "VerificationMessageTemplate":{ "shape":"VerificationMessageTemplateType", @@ -4031,6 +4031,50 @@ }, "documentation":"

The configuration for a custom domain that hosts the sign-up and sign-in webpages for your application.

" }, + "CustomEmailLambdaVersionConfigType":{ + "type":"structure", + "required":[ + "LambdaVersion", + "LambdaArn" + ], + "members":{ + "LambdaVersion":{ + "shape":"CustomEmailSenderLambdaVersionType", + "documentation":"

The Lambda version represents the signature of the \"request\" attribute in the \"event\" information Amazon Cognito passes to your custom email Lambda function. The only supported value is V1_0.

" + }, + "LambdaArn":{ + "shape":"ArnType", + "documentation":"

The Lambda Amazon Resource Name of the Lambda function that Amazon Cognito triggers to send email notifications to users.

" + } + }, + "documentation":"

A custom email sender Lambda configuration type.

" + }, + "CustomEmailSenderLambdaVersionType":{ + "type":"string", + "enum":["V1_0"] + }, + "CustomSMSLambdaVersionConfigType":{ + "type":"structure", + "required":[ + "LambdaVersion", + "LambdaArn" + ], + "members":{ + "LambdaVersion":{ + "shape":"CustomSMSSenderLambdaVersionType", + "documentation":"

The Lambda version represents the signature of the \"request\" attribute in the \"event\" information Amazon Cognito passes to your custom SMS Lambda function. The only supported value is V1_0.

" + }, + "LambdaArn":{ + "shape":"ArnType", + "documentation":"

The Lambda Amazon Resource Name of the Lambda function that Amazon Cognito triggers to send SMS notifications to users.

" + } + }, + "documentation":"

A custom SMS sender Lambda configuration type.

" + }, + "CustomSMSSenderLambdaVersionType":{ + "type":"string", + "enum":["V1_0"] + }, "DateType":{"type":"timestamp"}, "DefaultEmailOptionType":{ "type":"string", @@ -4525,7 +4569,7 @@ }, "EmailSendingAccount":{ "shape":"EmailSendingAccountType", - "documentation":"

Specifies whether Amazon Cognito emails your users by using its built-in email functionality or your Amazon SES email configuration. Specify one of the following values:

COGNITO_DEFAULT

When Amazon Cognito emails your users, it uses its built-in email functionality. When you use the default option, Amazon Cognito allows only a limited number of emails each day for your user pool. For typical production environments, the default email limit is below the required delivery volume. To achieve a higher delivery volume, specify DEVELOPER to use your Amazon SES email configuration.

To look up the email delivery limit for the default option, see Limits in Amazon Cognito in the Amazon Cognito Developer Guide.

The default FROM address is no-reply@verificationemail.com. To customize the FROM address, provide the ARN of an Amazon SES verified email address for the SourceArn parameter.

DEVELOPER

When Amazon Cognito emails your users, it uses your Amazon SES configuration. Amazon Cognito calls Amazon SES on your behalf to send email from your verified email address. When you use this option, the email delivery limits are the same limits that apply to your Amazon SES verified email address in your AWS account.

If you use this option, you must provide the ARN of an Amazon SES verified email address for the SourceArn parameter.

Before Amazon Cognito can email your users, it requires additional permissions to call Amazon SES on your behalf. When you update your user pool with this option, Amazon Cognito creates a service-linked role, which is a type of IAM role, in your AWS account. This role contains the permissions that allow Amazon Cognito to access Amazon SES and send email messages with your address. For more information about the service-linked role that Amazon Cognito creates, see Using Service-Linked Roles for Amazon Cognito in the Amazon Cognito Developer Guide.

" + "documentation":"

Specifies whether Amazon Cognito emails your users by using its built-in email functionality or your Amazon SES email configuration. Specify one of the following values:

COGNITO_DEFAULT

When Amazon Cognito emails your users, it uses its built-in email functionality. When you use the default option, Amazon Cognito allows only a limited number of emails each day for your user pool. For typical production environments, the default email limit is below the required delivery volume. To achieve a higher delivery volume, specify DEVELOPER to use your Amazon SES email configuration.

To look up the email delivery limit for the default option, see Limits in Amazon Cognito in the Amazon Cognito Developer Guide.

The default FROM address is no-reply@verificationemail.com. To customize the FROM address, provide the ARN of an Amazon SES verified email address for the SourceArn parameter.

If EmailSendingAccount is COGNITO_DEFAULT, the following parameters aren't allowed:

  • EmailVerificationMessage

  • EmailVerificationSubject

  • InviteMessageTemplate.EmailMessage

  • InviteMessageTemplate.EmailSubject

  • VerificationMessageTemplate.EmailMessage

  • VerificationMessageTemplate.EmailMessageByLink

  • VerificationMessageTemplate.EmailSubject,

  • VerificationMessageTemplate.EmailSubjectByLink

DEVELOPER EmailSendingAccount is required.

DEVELOPER

When Amazon Cognito emails your users, it uses your Amazon SES configuration. Amazon Cognito calls Amazon SES on your behalf to send email from your verified email address. When you use this option, the email delivery limits are the same limits that apply to your Amazon SES verified email address in your AWS account.

If you use this option, you must provide the ARN of an Amazon SES verified email address for the SourceArn parameter.

Before Amazon Cognito can email your users, it requires additional permissions to call Amazon SES on your behalf. When you update your user pool with this option, Amazon Cognito creates a service-linked role, which is a type of IAM role, in your AWS account. This role contains the permissions that allow Amazon Cognito to access Amazon SES and send email messages with your address. For more information about the service-linked role that Amazon Cognito creates, see Using Service-Linked Roles for Amazon Cognito in the Amazon Cognito Developer Guide.

" }, "From":{ "shape":"StringType", @@ -4536,7 +4580,7 @@ "documentation":"

The set of configuration rules that can be applied to emails sent using Amazon SES. A configuration set is applied to an email by including a reference to the configuration set in the headers of the email. Once applied, all of the rules in that configuration set are applied to the email. Configuration sets can be used to apply the following types of rules to emails:

  • Event publishing – Amazon SES can track the number of send, delivery, open, click, bounce, and complaint events for each email sent. Use event publishing to send information about these events to other AWS services such as SNS and CloudWatch.

  • IP pool management – When leasing dedicated IP addresses with Amazon SES, you can create groups of IP addresses, called dedicated IP pools. You can then associate the dedicated IP pools with configuration sets.

" } }, - "documentation":"

The email configuration type.

" + "documentation":"

The email configuration type.

Amazon Cognito has specific regions for use with Amazon SES. For more information on the supported regions, see Email Settings for Amazon Cognito User Pools.

" }, "EmailNotificationBodyType":{ "type":"string", @@ -5388,6 +5432,18 @@ "UserMigration":{ "shape":"ArnType", "documentation":"

The user migration Lambda config type.

" + }, + "CustomSMSSender":{ + "shape":"CustomSMSLambdaVersionConfigType", + "documentation":"

A custom SMS sender AWS Lambda trigger.

" + }, + "CustomEmailSender":{ + "shape":"CustomEmailLambdaVersionConfigType", + "documentation":"

A custom email sender AWS Lambda trigger.

" + }, + "KMSKeyID":{ + "shape":"ArnType", + "documentation":"

The Amazon Resource Name of Key Management Service Customer master keys . Amazon Cognito uses the key to encrypt codes and temporary passwords sent to CustomEmailSender and CustomSMSSender.

" } }, "documentation":"

Specifies the configuration for AWS Lambda triggers.

" @@ -5793,11 +5849,11 @@ }, "EmailMessage":{ "shape":"EmailVerificationMessageType", - "documentation":"

The message template for email messages.

" + "documentation":"

The message template for email messages. EmailMessage is allowed only if EmailSendingAccount is DEVELOPER.

" }, "EmailSubject":{ "shape":"EmailVerificationSubjectType", - "documentation":"

The subject line for email messages.

" + "documentation":"

The subject line for email messages. EmailSubject is allowed only if EmailSendingAccount is DEVELOPER.

" } }, "documentation":"

The message template structure.

" @@ -6386,14 +6442,14 @@ "members":{ "Enabled":{ "shape":"BooleanType", - "documentation":"

Specifies whether SMS text message MFA is enabled.

" + "documentation":"

Specifies whether SMS text message MFA is enabled. If an MFA type is enabled for a user, the user will be prompted for MFA during all sign in attempts, unless device tracking is turned on and the device has been trusted.

" }, "PreferredMfa":{ "shape":"BooleanType", "documentation":"

Specifies whether SMS is the preferred MFA method.

" } }, - "documentation":"

The type used for enabling SMS MFA at the user level.

" + "documentation":"

The type used for enabling SMS MFA at the user level. Phone numbers don't need to be verified to be used for SMS MFA. If an MFA type is enabled for a user, the user will be prompted for MFA during all sign in attempts, unless device tracking is turned on and the device has been trusted. If you would like MFA to be applied selectively based on the assessed risk level of sign in attempts, disable MFA for users and turn on Adaptive Authentication for the user pool.

" }, "SchemaAttributeType":{ "type":"structure", @@ -6718,7 +6774,7 @@ "members":{ "SnsCallerArn":{ "shape":"ArnType", - "documentation":"

The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) caller. This is the ARN of the IAM role in your AWS account which Cognito will use to send SMS messages.

" + "documentation":"

The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) caller. This is the ARN of the IAM role in your AWS account which Cognito will use to send SMS messages. SMS messages are subject to a spending limit.

" }, "ExternalId":{ "shape":"StringType", @@ -6776,14 +6832,14 @@ "members":{ "Enabled":{ "shape":"BooleanType", - "documentation":"

Specifies whether software token MFA is enabled.

" + "documentation":"

Specifies whether software token MFA is enabled. If an MFA type is enabled for a user, the user will be prompted for MFA during all sign in attempts, unless device tracking is turned on and the device has been trusted.

" }, "PreferredMfa":{ "shape":"BooleanType", "documentation":"

Specifies whether software token MFA is the preferred MFA method.

" } }, - "documentation":"

The type used for enabling software token MFA at the user level.

" + "documentation":"

The type used for enabling software token MFA at the user level. If an MFA type is enabled for a user, the user will be prompted for MFA during all sign in attempts, unless device tracking is turned on and the device has been trusted. If you would like MFA to be applied selectively based on the assessed risk level of sign in attempts, disable MFA for users and turn on Adaptive Authentication for the user pool.

" }, "StartUserImportJobRequest":{ "type":"structure", @@ -8061,19 +8117,19 @@ }, "EmailMessage":{ "shape":"EmailVerificationMessageType", - "documentation":"

The email message template.

" + "documentation":"

The email message template. EmailMessage is allowed only if EmailSendingAccount is DEVELOPER.

" }, "EmailSubject":{ "shape":"EmailVerificationSubjectType", - "documentation":"

The subject line for the email message template.

" + "documentation":"

The subject line for the email message template. EmailSubject is allowed only if EmailSendingAccount is DEVELOPER.

" }, "EmailMessageByLink":{ "shape":"EmailVerificationMessageByLinkType", - "documentation":"

The email message template for sending a confirmation link to the user.

" + "documentation":"

The email message template for sending a confirmation link to the user. EmailMessageByLink is allowed only if EmailSendingAccount is DEVELOPER.

" }, "EmailSubjectByLink":{ "shape":"EmailVerificationSubjectByLinkType", - "documentation":"

The subject line for the email message template for sending a confirmation link to the user.

" + "documentation":"

The subject line for the email message template for sending a confirmation link to the user. EmailSubjectByLink is allowed only EmailSendingAccount is DEVELOPER.

" }, "DefaultEmailOption":{ "shape":"DefaultEmailOptionType", diff --git a/services/cognitosync/pom.xml b/services/cognitosync/pom.xml index 39d08ce9ed23..14f91daf5ec0 100644 --- a/services/cognitosync/pom.xml +++ b/services/cognitosync/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT cognitosync AWS Java SDK :: Services :: Amazon Cognito Sync diff --git a/services/comprehend/pom.xml b/services/comprehend/pom.xml index 754d66463d50..337077bff7ca 100644 --- a/services/comprehend/pom.xml +++ b/services/comprehend/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT 4.0.0 comprehend diff --git a/services/comprehend/src/main/resources/codegen-resources/paginators-1.json b/services/comprehend/src/main/resources/codegen-resources/paginators-1.json index 0a98c5b4e9fd..3c7889ffc025 100644 --- a/services/comprehend/src/main/resources/codegen-resources/paginators-1.json +++ b/services/comprehend/src/main/resources/codegen-resources/paginators-1.json @@ -25,6 +25,11 @@ "output_token": "NextToken", "limit_key": "MaxResults" }, + "ListEventsDetectionJobs": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, "ListKeyPhrasesDetectionJobs": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/services/comprehend/src/main/resources/codegen-resources/service-2.json b/services/comprehend/src/main/resources/codegen-resources/service-2.json index 63fbd425d313..8b0597aa5117 100644 --- a/services/comprehend/src/main/resources/codegen-resources/service-2.json +++ b/services/comprehend/src/main/resources/codegen-resources/service-2.json @@ -322,6 +322,22 @@ ], "documentation":"

Provides details about an entity recognizer including status, S3 buckets containing training data, recognizer metadata, metrics, and so on.

" }, + "DescribeEventsDetectionJob":{ + "name":"DescribeEventsDetectionJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEventsDetectionJobRequest"}, + "output":{"shape":"DescribeEventsDetectionJobResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"JobNotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Gets the status and details of an events detection job.

" + }, "DescribeKeyPhrasesDetectionJob":{ "name":"DescribeKeyPhrasesDetectionJob", "http":{ @@ -577,6 +593,22 @@ ], "documentation":"

Gets a list of the properties of all entity recognizers that you created, including recognizers currently in training. Allows you to filter the list of recognizers based on criteria such as status and submission time. This call returns up to 500 entity recognizers in the list, with a default number of 100 recognizers in the list.

The results of this list are not in any particular order. Please get the list and sort locally if needed.

" }, + "ListEventsDetectionJobs":{ + "name":"ListEventsDetectionJobs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListEventsDetectionJobsRequest"}, + "output":{"shape":"ListEventsDetectionJobsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InvalidFilterException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Gets a list of the events detection jobs that you have submitted.

" + }, "ListKeyPhrasesDetectionJobs":{ "name":"ListKeyPhrasesDetectionJobs", "http":{ @@ -708,6 +740,22 @@ ], "documentation":"

Starts an asynchronous entity detection job for a collection of documents. Use the operation to track the status of a job.

This API can be used for either standard entity detection or custom entity recognition. In order to be used for custom entity recognition, the optional EntityRecognizerArn must be used in order to provide access to the recognizer being used to detect the custom entity.

" }, + "StartEventsDetectionJob":{ + "name":"StartEventsDetectionJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartEventsDetectionJobRequest"}, + "output":{"shape":"StartEventsDetectionJobResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"KmsKeyValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Starts an asynchronous event detection job for a collection of documents.

" + }, "StartKeyPhrasesDetectionJob":{ "name":"StartKeyPhrasesDetectionJob", "http":{ @@ -802,6 +850,21 @@ ], "documentation":"

Stops an entities detection job in progress.

If the job state is IN_PROGRESS the job is marked for termination and put into the STOP_REQUESTED state. If the job completes before it can be stopped, it is put into the COMPLETED state; otherwise the job is stopped and put into the STOPPED state.

If the job is in the COMPLETED or FAILED state when you call the StopDominantLanguageDetectionJob operation, the operation returns a 400 Internal Request Exception.

When a job is stopped, any documents already processed are written to the output location.

" }, + "StopEventsDetectionJob":{ + "name":"StopEventsDetectionJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopEventsDetectionJobRequest"}, + "output":{"shape":"StopEventsDetectionJobResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"JobNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Stops an events detection job in progress.

" + }, "StopKeyPhrasesDetectionJob":{ "name":"StopKeyPhrasesDetectionJob", "http":{ @@ -1322,7 +1385,7 @@ }, "Labels":{ "shape":"ListOfLabels", - "documentation":"

The labels used the document being analyzed. These are used for multi-label trained models. Individual labels represent different categories that are related in some manner and are not multually exclusive. For example, a movie can be just an action movie, or it can be an action movie, a science fiction movie, and a comedy, all at the same time.

" + "documentation":"

The labels used the document being analyzed. These are used for multi-label trained models. Individual labels represent different categories that are related in some manner and are not mutually exclusive. For example, a movie can be just an action movie, or it can be an action movie, a science fiction movie, and a comedy, all at the same time.

" } }, "sensitive":true @@ -1689,6 +1752,25 @@ } } }, + "DescribeEventsDetectionJobRequest":{ + "type":"structure", + "required":["JobId"], + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

The identifier of the events detection job.

" + } + } + }, + "DescribeEventsDetectionJobResponse":{ + "type":"structure", + "members":{ + "EventsDetectionJobProperties":{ + "shape":"EventsDetectionJobProperties", + "documentation":"

An object that contains the properties associated with an event detection job.

" + } + } + }, "DescribeKeyPhrasesDetectionJobRequest":{ "type":"structure", "required":["JobId"], @@ -2292,7 +2374,7 @@ "documentation":"

Specifies a date after which the returned endpoint or endpoints were created.

" } }, - "documentation":"

The filter used to determine which endpoints are are returned. You can filter jobs on their name, model, status, or the date and time that they were created. You can only set one filter at a time.

" + "documentation":"

The filter used to determine which endpoints are returned. You can filter jobs on their name, model, status, or the date and time that they were created. You can only set one filter at a time.

" }, "EndpointProperties":{ "type":"structure", @@ -2716,7 +2798,7 @@ }, "F1Score":{ "shape":"Double", - "documentation":"

A measure of how accurate the recognizer results are for for a specific entity type in the test data. It is derived from the Precision and Recall values. The F1Score is the harmonic average of the two scores. The highest score is 1, and the worst score is 0.

" + "documentation":"

A measure of how accurate the recognizer results are for a specific entity type in the test data. It is derived from the Precision and Recall values. The F1Score is the harmonic average of the two scores. The highest score is 1, and the worst score is 0.

" } }, "documentation":"

Detailed information about the accuracy of an entity recognizer for a specific entity type.

" @@ -2736,6 +2818,88 @@ }, "documentation":"

An entity type within a labeled training dataset that Amazon Comprehend uses to train a custom entity recognizer.

" }, + "EventTypeString":{ + "type":"string", + "max":40, + "min":1, + "pattern":"[A-Z_]*" + }, + "EventsDetectionJobFilter":{ + "type":"structure", + "members":{ + "JobName":{ + "shape":"JobName", + "documentation":"

Filters on the name of the events detection job.

" + }, + "JobStatus":{ + "shape":"JobStatus", + "documentation":"

Filters the list of jobs based on job status. Returns only jobs with the specified status.

" + }, + "SubmitTimeBefore":{ + "shape":"Timestamp", + "documentation":"

Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted before the specified time. Jobs are returned in ascending order, oldest to newest.

" + }, + "SubmitTimeAfter":{ + "shape":"Timestamp", + "documentation":"

Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted after the specified time. Jobs are returned in descending order, newest to oldest.

" + } + }, + "documentation":"

Provides information for filtering a list of event detection jobs.

" + }, + "EventsDetectionJobProperties":{ + "type":"structure", + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

The identifier assigned to the events detection job.

" + }, + "JobName":{ + "shape":"JobName", + "documentation":"

The name you assigned the events detection job.

" + }, + "JobStatus":{ + "shape":"JobStatus", + "documentation":"

The current status of the events detection job.

" + }, + "Message":{ + "shape":"AnyLengthString", + "documentation":"

A description of the status of the events detection job.

" + }, + "SubmitTime":{ + "shape":"Timestamp", + "documentation":"

The time that the events detection job was submitted for processing.

" + }, + "EndTime":{ + "shape":"Timestamp", + "documentation":"

The time that the events detection job completed.

" + }, + "InputDataConfig":{ + "shape":"InputDataConfig", + "documentation":"

The input data configuration that you supplied when you created the events detection job.

" + }, + "OutputDataConfig":{ + "shape":"OutputDataConfig", + "documentation":"

The output data configuration that you supplied when you created the events detection job.

" + }, + "LanguageCode":{ + "shape":"LanguageCode", + "documentation":"

The language code of the input documents.

" + }, + "DataAccessRoleArn":{ + "shape":"IamRoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the AWS Identify and Access Management (IAM) role that grants Amazon Comprehend read access to your input data.

" + }, + "TargetEventTypes":{ + "shape":"TargetEventTypes", + "documentation":"

The types of events that are detected by the job.

" + } + }, + "documentation":"

Provides information about an events detection job.

" + }, + "EventsDetectionJobPropertiesList":{ + "type":"list", + "member":{"shape":"EventsDetectionJobProperties"} + }, "Float":{"type":"float"}, "IamRoleArn":{ "type":"string", @@ -3143,6 +3307,36 @@ } } }, + "ListEventsDetectionJobsRequest":{ + "type":"structure", + "members":{ + "Filter":{ + "shape":"EventsDetectionJobFilter", + "documentation":"

Filters the jobs that are returned. You can filter jobs on their name, status, or the date and time that they were submitted. You can only set one filter at a time.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

Identifies the next page of results to return.

" + }, + "MaxResults":{ + "shape":"MaxResultsInteger", + "documentation":"

The maximum number of results to return in each page.

" + } + } + }, + "ListEventsDetectionJobsResponse":{ + "type":"structure", + "members":{ + "EventsDetectionJobPropertiesList":{ + "shape":"EventsDetectionJobPropertiesList", + "documentation":"

A list containing the properties of each job that is returned.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

Identifies the next page of results to return.

" + } + } + }, "ListKeyPhrasesDetectionJobsRequest":{ "type":"structure", "members":{ @@ -3925,6 +4119,60 @@ } } }, + "StartEventsDetectionJobRequest":{ + "type":"structure", + "required":[ + "InputDataConfig", + "OutputDataConfig", + "DataAccessRoleArn", + "LanguageCode", + "TargetEventTypes" + ], + "members":{ + "InputDataConfig":{ + "shape":"InputDataConfig", + "documentation":"

Specifies the format and location of the input data for the job.

" + }, + "OutputDataConfig":{ + "shape":"OutputDataConfig", + "documentation":"

Specifies where to send the output files.

" + }, + "DataAccessRoleArn":{ + "shape":"IamRoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants Amazon Comprehend read access to your input data.

" + }, + "JobName":{ + "shape":"JobName", + "documentation":"

The identifier of the events detection job.

" + }, + "LanguageCode":{ + "shape":"LanguageCode", + "documentation":"

The language code of the input documents.

" + }, + "ClientRequestToken":{ + "shape":"ClientRequestTokenString", + "documentation":"

An unique identifier for the request. If you don't set the client request token, Amazon Comprehend generates one.

", + "idempotencyToken":true + }, + "TargetEventTypes":{ + "shape":"TargetEventTypes", + "documentation":"

The types of events to detect in the input documents.

" + } + } + }, + "StartEventsDetectionJobResponse":{ + "type":"structure", + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

An unique identifier for the request. If you don't set the client request token, Amazon Comprehend generates one.

" + }, + "JobStatus":{ + "shape":"JobStatus", + "documentation":"

The status of the events detection job.

" + } + } + }, "StartKeyPhrasesDetectionJobRequest":{ "type":"structure", "required":[ @@ -4199,6 +4447,29 @@ } } }, + "StopEventsDetectionJobRequest":{ + "type":"structure", + "required":["JobId"], + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

The identifier of the events detection job to stop.

" + } + } + }, + "StopEventsDetectionJobResponse":{ + "type":"structure", + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

The identifier of the events detection job to stop.

" + }, + "JobStatus":{ + "shape":"JobStatus", + "documentation":"

The status of the events detection job.

" + } + } + }, "StopKeyPhrasesDetectionJobRequest":{ "type":"structure", "required":["JobId"], @@ -4406,6 +4677,11 @@ "max":256, "min":0 }, + "TargetEventTypes":{ + "type":"list", + "member":{"shape":"EventTypeString"}, + "min":1 + }, "TextSizeLimitExceededException":{ "type":"structure", "members":{ @@ -4587,7 +4863,7 @@ "documentation":"

The ID for each subnet being used in your private VPC. This subnet is a subset of the a range of IPv4 addresses used by the VPC and is specific to a given availability zone in the VPC’s region. This ID number is preceded by \"subnet-\", for instance: \"subnet-04ccf456919e69055\". For more information, see VPCs and Subnets.

" } }, - "documentation":"

Configuration parameters for an optional private Virtual Private Cloud (VPC) containing the resources you are using for the job. For For more information, see Amazon VPC.

" + "documentation":"

Configuration parameters for an optional private Virtual Private Cloud (VPC) containing the resources you are using for the job. For more information, see Amazon VPC.

" } }, "documentation":"

Amazon Comprehend is an AWS service for gaining insight into the content of documents. Use these actions to determine the topics contained in your documents, the topics they discuss, the predominant sentiment expressed in them, the predominant language used, and more.

" diff --git a/services/comprehendmedical/pom.xml b/services/comprehendmedical/pom.xml index 2272911b6600..018612c3342e 100644 --- a/services/comprehendmedical/pom.xml +++ b/services/comprehendmedical/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT comprehendmedical AWS Java SDK :: Services :: ComprehendMedical diff --git a/services/computeoptimizer/pom.xml b/services/computeoptimizer/pom.xml index d473936afbde..3a823729bb9e 100644 --- a/services/computeoptimizer/pom.xml +++ b/services/computeoptimizer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT computeoptimizer AWS Java SDK :: Services :: Compute Optimizer diff --git a/services/computeoptimizer/src/main/resources/codegen-resources/service-2.json b/services/computeoptimizer/src/main/resources/codegen-resources/service-2.json index 8d1054b5318a..e3e53cfa93a2 100644 --- a/services/computeoptimizer/src/main/resources/codegen-resources/service-2.json +++ b/services/computeoptimizer/src/main/resources/codegen-resources/service-2.json @@ -93,6 +93,26 @@ ], "documentation":"

Returns Auto Scaling group recommendations.

AWS Compute Optimizer generates recommendations for Amazon EC2 Auto Scaling groups that meet a specific set of requirements. For more information, see the Supported resources and requirements in the AWS Compute Optimizer User Guide.

" }, + "GetEBSVolumeRecommendations":{ + "name":"GetEBSVolumeRecommendations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetEBSVolumeRecommendationsRequest"}, + "output":{"shape":"GetEBSVolumeRecommendationsResponse"}, + "errors":[ + {"shape":"OptInRequiredException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"MissingAuthenticationToken"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Returns Amazon Elastic Block Store (Amazon EBS) volume recommendations.

AWS Compute Optimizer generates recommendations for Amazon EBS volumes that meet a specific set of requirements. For more information, see the Supported resources and requirements in the AWS Compute Optimizer User Guide.

" + }, "GetEC2InstanceRecommendations":{ "name":"GetEC2InstanceRecommendations", "http":{ @@ -149,7 +169,7 @@ {"shape":"MissingAuthenticationToken"}, {"shape":"ThrottlingException"} ], - "documentation":"

Returns the enrollment (opt in) status of an account to the AWS Compute Optimizer service.

If the account is the master account of an organization, this action also confirms the enrollment status of member accounts within the organization.

" + "documentation":"

Returns the enrollment (opt in) status of an account to the AWS Compute Optimizer service.

If the account is the management account of an organization, this action also confirms the enrollment status of member accounts within the organization.

" }, "GetRecommendationSummaries":{ "name":"GetRecommendationSummaries", @@ -186,7 +206,7 @@ {"shape":"MissingAuthenticationToken"}, {"shape":"ThrottlingException"} ], - "documentation":"

Updates the enrollment (opt in) status of an account to the AWS Compute Optimizer service.

If the account is a master account of an organization, this action can also be used to enroll member accounts within the organization.

" + "documentation":"

Updates the enrollment (opt in) status of an account to the AWS Compute Optimizer service.

If the account is a management account of an organization, this action can also be used to enroll member accounts within the organization.

" } }, "shapes":{ @@ -249,7 +269,7 @@ }, "finding":{ "shape":"Finding", - "documentation":"

The finding classification for the Auto Scaling group.

Findings for Auto Scaling groups include:

  • NotOptimized —An Auto Scaling group is considered not optimized when AWS Compute Optimizer identifies a recommendation that can provide better performance for your workload.

  • Optimized —An Auto Scaling group is considered optimized when Compute Optimizer determines that the group is correctly provisioned to run your workload based on the chosen instance type. For optimized resources, Compute Optimizer might recommend a new generation instance type.

The values that are returned might be NOT_OPTIMIZED or OPTIMIZED.

" + "documentation":"

The finding classification for the Auto Scaling group.

Findings for Auto Scaling groups include:

  • NotOptimized —An Auto Scaling group is considered not optimized when AWS Compute Optimizer identifies a recommendation that can provide better performance for your workload.

  • Optimized —An Auto Scaling group is considered optimized when Compute Optimizer determines that the group is correctly provisioned to run your workload based on the chosen instance type. For optimized resources, Compute Optimizer might recommend a new generation instance type.

" }, "utilizationMetrics":{ "shape":"UtilizationMetrics", @@ -345,6 +365,66 @@ "DestinationBucket":{"type":"string"}, "DestinationKey":{"type":"string"}, "DestinationKeyPrefix":{"type":"string"}, + "EBSFilter":{ + "type":"structure", + "members":{ + "name":{ + "shape":"EBSFilterName", + "documentation":"

The name of the filter.

Specify Finding to return recommendations with a specific finding classification (e.g., Optimized).

" + }, + "values":{ + "shape":"FilterValues", + "documentation":"

The value of the filter.

The valid values are Optimized, or NotOptimized.

" + } + }, + "documentation":"

Describes a filter that returns a more specific list of Amazon Elastic Block Store (Amazon EBS) volume recommendations.

This filter is used with the GetEBSVolumeRecommendations action.

" + }, + "EBSFilterName":{ + "type":"string", + "enum":["Finding"] + }, + "EBSFilters":{ + "type":"list", + "member":{"shape":"EBSFilter"} + }, + "EBSFinding":{ + "type":"string", + "enum":[ + "Optimized", + "NotOptimized" + ] + }, + "EBSMetricName":{ + "type":"string", + "enum":[ + "VolumeReadOpsPerSecond", + "VolumeWriteOpsPerSecond", + "VolumeReadBytesPerSecond", + "VolumeWriteBytesPerSecond" + ] + }, + "EBSUtilizationMetric":{ + "type":"structure", + "members":{ + "name":{ + "shape":"EBSMetricName", + "documentation":"

The name of the utilization metric.

The following utilization metrics are available:

  • VolumeReadOpsPerSecond - The completed read operations per second from the volume in a specified period of time.

    Unit: Count

  • VolumeWriteOpsPerSecond - The completed write operations per second to the volume in a specified period of time.

    Unit: Count

  • VolumeReadBytesPerSecond - The bytes read per second from the volume in a specified period of time.

    Unit: Bytes

  • VolumeWriteBytesPerSecond - The bytes written to the volume in a specified period of time.

    Unit: Bytes

" + }, + "statistic":{ + "shape":"MetricStatistic", + "documentation":"

The statistic of the utilization metric.

The following statistics are available:

  • Average - This is the value of Sum / SampleCount during the specified period, or the average value observed during the specified period.

  • Maximum - The highest value observed during the specified period. Use this value to determine high volumes of activity for your application.

" + }, + "value":{ + "shape":"MetricValue", + "documentation":"

The value of the utilization metric.

" + } + }, + "documentation":"

Describes a utilization metric of an Amazon Elastic Block Store (Amazon EBS) volume.

Compare the utilization metric data of your resource against its projected utilization metric data to determine the performance difference between your current resource and the recommended option.

" + }, + "EBSUtilizationMetrics":{ + "type":"list", + "member":{"shape":"EBSUtilizationMetric"} + }, "ErrorMessage":{"type":"string"}, "ExportAutoScalingGroupRecommendationsRequest":{ "type":"structure", @@ -352,7 +432,7 @@ "members":{ "accountIds":{ "shape":"AccountIds", - "documentation":"

The IDs of the AWS accounts for which to export Auto Scaling group recommendations.

If your account is the master account of an organization, use this parameter to specify the member accounts for which you want to export recommendations.

This parameter cannot be specified together with the include member accounts parameter. The parameters are mutually exclusive.

Recommendations for member accounts are not included in the export if this parameter, or the include member accounts parameter, is omitted.

You can specify multiple account IDs per request.

" + "documentation":"

The IDs of the AWS accounts for which to export Auto Scaling group recommendations.

If your account is the management account of an organization, use this parameter to specify the member accounts for which you want to export recommendations.

This parameter cannot be specified together with the include member accounts parameter. The parameters are mutually exclusive.

Recommendations for member accounts are not included in the export if this parameter, or the include member accounts parameter, is omitted.

You can specify multiple account IDs per request.

" }, "filters":{ "shape":"Filters", @@ -372,7 +452,7 @@ }, "includeMemberAccounts":{ "shape":"IncludeMemberAccounts", - "documentation":"

Indicates whether to include recommendations for resources in all member accounts of the organization if your account is the master account of an organization.

The member accounts must also be opted in to Compute Optimizer.

Recommendations for member accounts of the organization are not included in the export file if this parameter is omitted.

This parameter cannot be specified together with the account IDs parameter. The parameters are mutually exclusive.

Recommendations for member accounts are not included in the export if this parameter, or the account IDs parameter, is omitted.

" + "documentation":"

Indicates whether to include recommendations for resources in all member accounts of the organization if your account is the management account of an organization.

The member accounts must also be opted in to Compute Optimizer.

Recommendations for member accounts of the organization are not included in the export file if this parameter is omitted.

This parameter cannot be specified together with the account IDs parameter. The parameters are mutually exclusive.

Recommendations for member accounts are not included in the export if this parameter, or the account IDs parameter, is omitted.

" } } }, @@ -405,7 +485,7 @@ "members":{ "accountIds":{ "shape":"AccountIds", - "documentation":"

The IDs of the AWS accounts for which to export instance recommendations.

If your account is the master account of an organization, use this parameter to specify the member accounts for which you want to export recommendations.

This parameter cannot be specified together with the include member accounts parameter. The parameters are mutually exclusive.

Recommendations for member accounts are not included in the export if this parameter, or the include member accounts parameter, is omitted.

You can specify multiple account IDs per request.

" + "documentation":"

The IDs of the AWS accounts for which to export instance recommendations.

If your account is the management account of an organization, use this parameter to specify the member accounts for which you want to export recommendations.

This parameter cannot be specified together with the include member accounts parameter. The parameters are mutually exclusive.

Recommendations for member accounts are not included in the export if this parameter, or the include member accounts parameter, is omitted.

You can specify multiple account IDs per request.

" }, "filters":{ "shape":"Filters", @@ -425,7 +505,7 @@ }, "includeMemberAccounts":{ "shape":"IncludeMemberAccounts", - "documentation":"

Indicates whether to include recommendations for resources in all member accounts of the organization if your account is the master account of an organization.

The member accounts must also be opted in to Compute Optimizer.

Recommendations for member accounts of the organization are not included in the export file if this parameter is omitted.

Recommendations for member accounts are not included in the export if this parameter, or the account IDs parameter, is omitted.

" + "documentation":"

Indicates whether to include recommendations for resources in all member accounts of the organization if your account is the management account of an organization.

The member accounts must also be opted in to Compute Optimizer.

Recommendations for member accounts of the organization are not included in the export file if this parameter is omitted.

Recommendations for member accounts are not included in the export if this parameter, or the account IDs parameter, is omitted.

" } } }, @@ -540,14 +620,14 @@ "members":{ "name":{ "shape":"FilterName", - "documentation":"

The name of the filter.

Specify Finding to return recommendations with a specific findings classification (e.g., Overprovisioned).

Specify RecommendationSourceType to return recommendations of a specific resource type (e.g., AutoScalingGroup).

" + "documentation":"

The name of the filter.

Specify Finding to return recommendations with a specific finding classification (e.g., Overprovisioned).

Specify RecommendationSourceType to return recommendations of a specific resource type (e.g., AutoScalingGroup).

" }, "values":{ "shape":"FilterValues", - "documentation":"

The value of the filter.

If you specify the name parameter as Finding, and you request recommendations for an instance, then the valid values are Underprovisioned, Overprovisioned, NotOptimized, or Optimized.

If you specify the name parameter as Finding, and you request recommendations for an Auto Scaling group, then the valid values are Optimized, or NotOptimized.

If you specify the name parameter as RecommendationSourceType, then the valid values are Ec2Instance, or AutoScalingGroup.

" + "documentation":"

The value of the filter.

The valid values for this parameter are as follows, depending on what you specify for the name parameter and the resource type that you wish to filter results for:

  • Specify Optimized or NotOptimized if you specified the name parameter as Finding and you want to filter results for Auto Scaling groups.

  • Specify Underprovisioned, Overprovisioned, or Optimized if you specified the name parameter as Finding and you want to filter results for EC2 instances.

  • Specify Ec2Instance or AutoScalingGroup if you specified the name parameter as RecommendationSourceType.

" } }, - "documentation":"

Describes a filter that returns a more specific list of recommendations.

" + "documentation":"

Describes a filter that returns a more specific list of recommendations.

This filter is used with the GetAutoScalingGroupRecommendations and GetEC2InstanceRecommendations actions.

" }, "FilterName":{ "type":"string", @@ -579,7 +659,7 @@ "members":{ "accountIds":{ "shape":"AccountIds", - "documentation":"

The IDs of the AWS accounts for which to return Auto Scaling group recommendations.

If your account is the master account of an organization, use this parameter to specify the member accounts for which you want to return Auto Scaling group recommendations.

Only one account ID can be specified per request.

" + "documentation":"

The IDs of the AWS accounts for which to return Auto Scaling group recommendations.

If your account is the management account of an organization, use this parameter to specify the member accounts for which you want to return Auto Scaling group recommendations.

Only one account ID can be specified per request.

" }, "autoScalingGroupArns":{ "shape":"AutoScalingGroupArns", @@ -616,6 +696,48 @@ } } }, + "GetEBSVolumeRecommendationsRequest":{ + "type":"structure", + "members":{ + "volumeArns":{ + "shape":"VolumeArns", + "documentation":"

The Amazon Resource Name (ARN) of the volumes for which to return recommendations.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token to advance to the next page of volume recommendations.

" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of volume recommendations to return with a single request.

To retrieve the remaining results, make another request with the returned NextToken value.

" + }, + "filters":{ + "shape":"EBSFilters", + "documentation":"

An array of objects that describe a filter that returns a more specific list of volume recommendations.

" + }, + "accountIds":{ + "shape":"AccountIds", + "documentation":"

The IDs of the AWS accounts for which to return volume recommendations.

If your account is the management account of an organization, use this parameter to specify the member accounts for which you want to return volume recommendations.

Only one account ID can be specified per request.

" + } + } + }, + "GetEBSVolumeRecommendationsResponse":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token to use to advance to the next page of volume recommendations.

This value is null when there are no more pages of volume recommendations to return.

" + }, + "volumeRecommendations":{ + "shape":"VolumeRecommendations", + "documentation":"

An array of objects that describe volume recommendations.

" + }, + "errors":{ + "shape":"GetRecommendationErrors", + "documentation":"

An array of objects that describe errors of the request.

For example, an error is returned if you request recommendations for an unsupported volume.

" + } + } + }, "GetEC2InstanceRecommendationsRequest":{ "type":"structure", "members":{ @@ -637,7 +759,7 @@ }, "accountIds":{ "shape":"AccountIds", - "documentation":"

The IDs of the AWS accounts for which to return instance recommendations.

If your account is the master account of an organization, use this parameter to specify the member accounts for which you want to return instance recommendations.

Only one account ID can be specified per request.

" + "documentation":"

The IDs of the AWS accounts for which to return instance recommendations.

If your account is the management account of an organization, use this parameter to specify the member accounts for which you want to return instance recommendations.

Only one account ID can be specified per request.

" } } }, @@ -717,7 +839,7 @@ }, "memberAccountsEnrolled":{ "shape":"MemberAccountsEnrolled", - "documentation":"

Confirms the enrollment status of member accounts within the organization, if the account is a master account of an organization.

" + "documentation":"

Confirms the enrollment status of member accounts within the organization, if the account is a management account of an organization.

" } } }, @@ -748,7 +870,7 @@ "members":{ "accountIds":{ "shape":"AccountIds", - "documentation":"

The IDs of the AWS accounts for which to return recommendation summaries.

If your account is the master account of an organization, use this parameter to specify the member accounts for which you want to return recommendation summaries.

Only one account ID can be specified per request.

" + "documentation":"

The IDs of the AWS accounts for which to return recommendation summaries.

If your account is the management account of an organization, use this parameter to specify the member accounts for which you want to return recommendation summaries.

Only one account ID can be specified per request.

" }, "nextToken":{ "shape":"NextToken", @@ -802,7 +924,7 @@ }, "finding":{ "shape":"Finding", - "documentation":"

The finding classification for the instance.

Findings for instances include:

  • Underprovisioned —An instance is considered under-provisioned when at least one specification of your instance, such as CPU, memory, or network, does not meet the performance requirements of your workload. Under-provisioned instances may lead to poor application performance.

  • Overprovisioned —An instance is considered over-provisioned when at least one specification of your instance, such as CPU, memory, or network, can be sized down while still meeting the performance requirements of your workload, and no specification is under-provisioned. Over-provisioned instances may lead to unnecessary infrastructure cost.

  • Optimized —An instance is considered optimized when all specifications of your instance, such as CPU, memory, and network, meet the performance requirements of your workload and is not over provisioned. An optimized instance runs your workloads with optimal performance and infrastructure cost. For optimized resources, AWS Compute Optimizer might recommend a new generation instance type.

The values that are returned might be UNDER_PROVISIONED, OVER_PROVISIONED, or OPTIMIZED.

" + "documentation":"

The finding classification for the instance.

Findings for instances include:

  • Underprovisioned —An instance is considered under-provisioned when at least one specification of your instance, such as CPU, memory, or network, does not meet the performance requirements of your workload. Under-provisioned instances may lead to poor application performance.

  • Overprovisioned —An instance is considered over-provisioned when at least one specification of your instance, such as CPU, memory, or network, can be sized down while still meeting the performance requirements of your workload, and no specification is under-provisioned. Over-provisioned instances may lead to unnecessary infrastructure cost.

  • Optimized —An instance is considered optimized when all specifications of your instance, such as CPU, memory, and network, meet the performance requirements of your workload and is not over provisioned. An optimized instance runs your workloads with optimal performance and infrastructure cost. For optimized resources, AWS Compute Optimizer might recommend a new generation instance type.

" }, "utilizationMetrics":{ "shape":"UtilizationMetrics", @@ -881,7 +1003,7 @@ }, "values":{ "shape":"FilterValues", - "documentation":"

The value of the filter.

If you specify the name parameter as ResourceType, the valid values are Ec2Instance or AutoScalingGroup.

If you specify the name parameter as JobStatus, the valid values are Queued, InProgress, Complete, or Failed.

" + "documentation":"

The value of the filter.

The valid values for this parameter are as follows, depending on what you specify for the name parameter:

  • Specify Ec2Instance or AutoScalingGroup if you specified the name parameter as ResourceType. There is no filter for EBS volumes because volume recommendations cannot be exported at this time.

  • Specify Queued, InProgress, Complete, or Failed if you specified the name parameter as JobStatus.

" } }, "documentation":"

Describes a filter that returns a more specific list of recommendation export jobs.

This filter is used with the DescribeRecommendationExportJobs action.

" @@ -985,7 +1107,7 @@ "members":{ "name":{ "shape":"MetricName", - "documentation":"

The name of the projected utilization metric.

" + "documentation":"

The name of the projected utilization metric.

The following projected utilization metrics are returned:

  • Cpu - The projected percentage of allocated EC2 compute units that would be in use on the recommendation option had you used that resource during the analyzed period. This metric identifies the processing power required to run an application on the recommendation option.

    Depending on the instance type, tools in your operating system can show a lower percentage than CloudWatch when the instance is not allocated a full processor core.

    Units: Percent

  • Memory - The percentage of memory that would be in use on the recommendation option had you used that resource during the analyzed period. This metric identifies the amount of memory required to run an application on the recommendation option.

    Units: Percent

    The Memory metric is returned only for resources that have the unified CloudWatch agent installed on them. For more information, see Enabling Memory Utilization with the CloudWatch Agent.

" }, "timestamps":{ "shape":"Timestamps", @@ -996,7 +1118,7 @@ "documentation":"

The values of the projected utilization metrics.

" } }, - "documentation":"

Describes a projected utilization metric of a recommendation option, such as an Amazon EC2 instance.

The Cpu and Memory metrics are the only projected utilization metrics returned when you run the GetEC2RecommendationProjectedMetrics action. Additionally, the Memory metric is returned only for resources that have the unified CloudWatch agent installed on them. For more information, see Enabling Memory Utilization with the CloudWatch Agent.

" + "documentation":"

Describes a projected utilization metric of a recommendation option, such as an Amazon EC2 instance. This represents the projected utilization of a recommendation option had you used that resource during the analyzed period.

Compare the utilization metric data of your resource against its projected utilization metric data to determine the performance difference between your current resource and the recommended option.

The Cpu and Memory metrics are the only projected utilization metrics returned when you run the GetEC2RecommendationProjectedMetrics action. Additionally, the Memory metric is returned only for resources that have the unified CloudWatch agent installed on them. For more information, see Enabling Memory Utilization with the CloudWatch Agent.

" }, "ProjectedMetrics":{ "type":"list", @@ -1068,7 +1190,8 @@ "type":"string", "enum":[ "Ec2Instance", - "AutoScalingGroup" + "AutoScalingGroup", + "EbsVolume" ] }, "RecommendationSources":{ @@ -1231,7 +1354,7 @@ }, "includeMemberAccounts":{ "shape":"IncludeMemberAccounts", - "documentation":"

Indicates whether to enroll member accounts of the organization if the your account is the master account of an organization.

" + "documentation":"

Indicates whether to enroll member accounts of the organization if the your account is the management account of an organization.

" } } }, @@ -1253,23 +1376,128 @@ "members":{ "name":{ "shape":"MetricName", - "documentation":"

The name of the utilization metric.

The Memory metric is returned only for resources that have the unified CloudWatch agent installed on them. For more information, see Enabling Memory Utilization with the CloudWatch Agent.

" + "documentation":"

The name of the utilization metric.

The following utilization metrics are available:

  • Cpu - The percentage of allocated EC2 compute units that are currently in use on the instance. This metric identifies the processing power required to run an application on the instance.

    Depending on the instance type, tools in your operating system can show a lower percentage than CloudWatch when the instance is not allocated a full processor core.

    Units: Percent

  • Memory - The percentage of memory that is currently in use on the instance. This metric identifies the amount of memory required to run an application on the instance.

    Units: Percent

    The Memory metric is returned only for resources that have the unified CloudWatch agent installed on them. For more information, see Enabling Memory Utilization with the CloudWatch Agent.

  • EBS_READ_OPS_PER_SECOND - The completed read operations from all EBS volumes attached to the instance in a specified period of time.

    Unit: Count

  • EBS_WRITE_OPS_PER_SECOND - The completed write operations to all EBS volumes attached to the instance in a specified period of time.

    Unit: Count

  • EBS_READ_BYTES_PER_SECOND - The bytes read from all EBS volumes attached to the instance in a specified period of time.

    Unit: Bytes

  • EBS_WRITE_BYTES_PER_SECOND - The bytes written to all EBS volumes attached to the instance in a specified period of time.

    Unit: Bytes

" }, "statistic":{ "shape":"MetricStatistic", - "documentation":"

The statistic of the utilization metric.

" + "documentation":"

The statistic of the utilization metric.

The following statistics are available:

  • Average - This is the value of Sum / SampleCount during the specified period, or the average value observed during the specified period.

  • Maximum - The highest value observed during the specified period. Use this value to determine high volumes of activity for your application.

" }, "value":{ "shape":"MetricValue", "documentation":"

The value of the utilization metric.

" } }, - "documentation":"

Describes a utilization metric of a resource, such as an Amazon EC2 instance.

" + "documentation":"

Describes a utilization metric of a resource, such as an Amazon EC2 instance.

Compare the utilization metric data of your resource against its projected utilization metric data to determine the performance difference between your current resource and the recommended option.

" }, "UtilizationMetrics":{ "type":"list", "member":{"shape":"UtilizationMetric"} - } + }, + "VolumeArn":{"type":"string"}, + "VolumeArns":{ + "type":"list", + "member":{"shape":"VolumeArn"} + }, + "VolumeBaselineIOPS":{"type":"integer"}, + "VolumeBaselineThroughput":{"type":"integer"}, + "VolumeBurstIOPS":{"type":"integer"}, + "VolumeBurstThroughput":{"type":"integer"}, + "VolumeConfiguration":{ + "type":"structure", + "members":{ + "volumeType":{ + "shape":"VolumeType", + "documentation":"

The volume type.

This can be gp2 for General Purpose SSD, io1 or io2 for Provisioned IOPS SSD, st1 for Throughput Optimized HDD, sc1 for Cold HDD, or standard for Magnetic volumes.

" + }, + "volumeSize":{ + "shape":"VolumeSize", + "documentation":"

The size of the volume, in GiB.

" + }, + "volumeBaselineIOPS":{ + "shape":"VolumeBaselineIOPS", + "documentation":"

The baseline IOPS of the volume.

" + }, + "volumeBurstIOPS":{ + "shape":"VolumeBurstIOPS", + "documentation":"

The burst IOPS of the volume.

" + }, + "volumeBaselineThroughput":{ + "shape":"VolumeBaselineThroughput", + "documentation":"

The baseline throughput of the volume.

" + }, + "volumeBurstThroughput":{ + "shape":"VolumeBurstThroughput", + "documentation":"

The burst throughput of the volume.

" + } + }, + "documentation":"

Describes the configuration of an Amazon Elastic Block Store (Amazon EBS) volume.

" + }, + "VolumeRecommendation":{ + "type":"structure", + "members":{ + "volumeArn":{ + "shape":"VolumeArn", + "documentation":"

The Amazon Resource Name (ARN) of the current volume.

" + }, + "accountId":{ + "shape":"AccountId", + "documentation":"

The AWS account ID of the volume.

" + }, + "currentConfiguration":{ + "shape":"VolumeConfiguration", + "documentation":"

An array of objects that describe the current configuration of the volume.

" + }, + "finding":{ + "shape":"EBSFinding", + "documentation":"

The finding classification for the volume.

Findings for volumes include:

  • NotOptimized —A volume is considered not optimized when AWS Compute Optimizer identifies a recommendation that can provide better performance for your workload.

  • Optimized —An volume is considered optimized when Compute Optimizer determines that the volume is correctly provisioned to run your workload based on the chosen volume type. For optimized resources, Compute Optimizer might recommend a new generation volume type.

" + }, + "utilizationMetrics":{ + "shape":"EBSUtilizationMetrics", + "documentation":"

An array of objects that describe the utilization metrics of the volume.

" + }, + "lookBackPeriodInDays":{ + "shape":"LookBackPeriodInDays", + "documentation":"

The number of days for which utilization metrics were analyzed for the volume.

" + }, + "volumeRecommendationOptions":{ + "shape":"VolumeRecommendationOptions", + "documentation":"

An array of objects that describe the recommendation options for the volume.

" + }, + "lastRefreshTimestamp":{ + "shape":"LastRefreshTimestamp", + "documentation":"

The time stamp of when the volume recommendation was last refreshed.

" + } + }, + "documentation":"

Describes an Amazon Elastic Block Store (Amazon EBS) volume recommendation.

" + }, + "VolumeRecommendationOption":{ + "type":"structure", + "members":{ + "configuration":{ + "shape":"VolumeConfiguration", + "documentation":"

An array of objects that describe a volume configuration.

" + }, + "performanceRisk":{ + "shape":"PerformanceRisk", + "documentation":"

The performance risk of the volume recommendation option.

Performance risk is the likelihood of the recommended volume type not meeting the performance requirement of your workload.

The lowest performance risk is categorized as 0, and the highest as 5.

" + }, + "rank":{ + "shape":"Rank", + "documentation":"

The rank of the volume recommendation option.

The top recommendation option is ranked as 1.

" + } + }, + "documentation":"

Describes a recommendation option for an Amazon Elastic Block Store (Amazon EBS) instance.

" + }, + "VolumeRecommendationOptions":{ + "type":"list", + "member":{"shape":"VolumeRecommendationOption"} + }, + "VolumeRecommendations":{ + "type":"list", + "member":{"shape":"VolumeRecommendation"} + }, + "VolumeSize":{"type":"integer"}, + "VolumeType":{"type":"string"} }, - "documentation":"

AWS Compute Optimizer is a service that analyzes the configuration and utilization metrics of your AWS resources, such as EC2 instances and Auto Scaling groups. It reports whether your resources are optimal, and generates optimization recommendations to reduce the cost and improve the performance of your workloads. Compute Optimizer also provides recent utilization metric data, as well as projected utilization metric data for the recommendations, which you can use to evaluate which recommendation provides the best price-performance trade-off. The analysis of your usage patterns can help you decide when to move or resize your running resources, and still meet your performance and capacity requirements. For more information about Compute Optimizer, including the required permissions to use the service, see the AWS Compute Optimizer User Guide.

" + "documentation":"

AWS Compute Optimizer is a service that analyzes the configuration and utilization metrics of your AWS compute resources, such as EC2 instances, Auto Scaling groups, and Amazon EBS volumes. It reports whether your resources are optimal, and generates optimization recommendations to reduce the cost and improve the performance of your workloads. Compute Optimizer also provides recent utilization metric data, as well as projected utilization metric data for the recommendations, which you can use to evaluate which recommendation provides the best price-performance trade-off. The analysis of your usage patterns can help you decide when to move or resize your running resources, and still meet your performance and capacity requirements. For more information about Compute Optimizer, including the required permissions to use the service, see the AWS Compute Optimizer User Guide.

" } diff --git a/services/config/pom.xml b/services/config/pom.xml index 323583b86d19..edb6b8d219b7 100644 --- a/services/config/pom.xml +++ b/services/config/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT config AWS Java SDK :: Services :: AWS Config diff --git a/services/connect/pom.xml b/services/connect/pom.xml index da02f7d810d6..cc6c1f7c7f94 100644 --- a/services/connect/pom.xml +++ b/services/connect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT connect AWS Java SDK :: Services :: Connect diff --git a/services/connect/src/main/resources/codegen-resources/paginators-1.json b/services/connect/src/main/resources/codegen-resources/paginators-1.json index 1005c52f0bd3..7a29e91eaa1c 100644 --- a/services/connect/src/main/resources/codegen-resources/paginators-1.json +++ b/services/connect/src/main/resources/codegen-resources/paginators-1.json @@ -10,6 +10,12 @@ "limit_key": "MaxResults", "output_token": "NextToken" }, + "ListApprovedOrigins": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Origins" + }, "ListContactFlows": { "input_token": "NextToken", "limit_key": "MaxResults", @@ -22,6 +28,42 @@ "output_token": "NextToken", "result_key": "HoursOfOperationSummaryList" }, + "ListInstanceAttributes": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Attributes" + }, + "ListInstanceStorageConfigs": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "StorageConfigs" + }, + "ListInstances": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "InstanceSummaryList" + }, + "ListIntegrationAssociations": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "IntegrationAssociationSummaryList" + }, + "ListLambdaFunctions": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "LambdaFunctions" + }, + "ListLexBots": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "LexBots" + }, "ListPhoneNumbers": { "input_token": "NextToken", "limit_key": "MaxResults", @@ -52,12 +94,24 @@ "output_token": "NextToken", "result_key": "RoutingProfileSummaryList" }, + "ListSecurityKeys": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "SecurityKeys" + }, "ListSecurityProfiles": { "input_token": "NextToken", "limit_key": "MaxResults", "output_token": "NextToken", "result_key": "SecurityProfileSummaryList" }, + "ListUseCases": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "UseCaseSummaryList" + }, "ListUserHierarchyGroups": { "input_token": "NextToken", "limit_key": "MaxResults", diff --git a/services/connect/src/main/resources/codegen-resources/service-2.json b/services/connect/src/main/resources/codegen-resources/service-2.json index 6327578e17fd..de1386960a39 100644 --- a/services/connect/src/main/resources/codegen-resources/service-2.json +++ b/services/connect/src/main/resources/codegen-resources/service-2.json @@ -13,6 +13,78 @@ "uid":"connect-2017-08-08" }, "operations":{ + "AssociateApprovedOrigin":{ + "name":"AssociateApprovedOrigin", + "http":{ + "method":"PUT", + "requestUri":"/instance/{InstanceId}/approved-origin" + }, + "input":{"shape":"AssociateApprovedOriginRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceConflictException"}, + {"shape":"InternalServiceException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Associates an approved origin to an Amazon Connect instance.

" + }, + "AssociateInstanceStorageConfig":{ + "name":"AssociateInstanceStorageConfig", + "http":{ + "method":"PUT", + "requestUri":"/instance/{InstanceId}/storage-config" + }, + "input":{"shape":"AssociateInstanceStorageConfigRequest"}, + "output":{"shape":"AssociateInstanceStorageConfigResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceConflictException"}, + {"shape":"InternalServiceException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Associates a storage resource type for the first time. You can only associate one type of storage configuration in a single call. This means, for example, that you can't define an instance with multiple S3 buckets for storing chat transcripts.

This API does not create a resource that doesn't exist. It only associates it to the instance. Ensure that the resource being specified in the storage configuration, like an Amazon S3 bucket, exists when being used for association.

" + }, + "AssociateLambdaFunction":{ + "name":"AssociateLambdaFunction", + "http":{ + "method":"PUT", + "requestUri":"/instance/{InstanceId}/lambda-function" + }, + "input":{"shape":"AssociateLambdaFunctionRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceConflictException"}, + {"shape":"InternalServiceException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Allows the specified Amazon Connect instance to access the specified Lambda function.

" + }, + "AssociateLexBot":{ + "name":"AssociateLexBot", + "http":{ + "method":"PUT", + "requestUri":"/instance/{InstanceId}/lex-bot" + }, + "input":{"shape":"AssociateLexBotRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceConflictException"}, + {"shape":"InternalServiceException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Allows the specified Amazon Connect instance to access the specified Amazon Lex bot.

" + }, "AssociateRoutingProfileQueues":{ "name":"AssociateRoutingProfileQueues", "http":{ @@ -29,6 +101,25 @@ ], "documentation":"

Associates a set of queues with a routing profile.

" }, + "AssociateSecurityKey":{ + "name":"AssociateSecurityKey", + "http":{ + "method":"PUT", + "requestUri":"/instance/{InstanceId}/security-key" + }, + "input":{"shape":"AssociateSecurityKeyRequest"}, + "output":{"shape":"AssociateSecurityKeyResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceConflictException"}, + {"shape":"InternalServiceException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Associates a security key to the instance.

" + }, "CreateContactFlow":{ "name":"CreateContactFlow", "http":{ @@ -49,6 +140,40 @@ ], "documentation":"

Creates a contact flow for the specified Amazon Connect instance.

You can also create and update contact flows using the Amazon Connect Flow language.

" }, + "CreateInstance":{ + "name":"CreateInstance", + "http":{ + "method":"PUT", + "requestUri":"/instance" + }, + "input":{"shape":"CreateInstanceRequest"}, + "output":{"shape":"CreateInstanceResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

This API is in preview release for Amazon Connect and is subject to change.

Initiates an Amazon Connect instance with all the supported channels enabled. It does not attach any storage (such as Amazon S3, or Kinesis) or allow for any configurations on features such as Contact Lens for Amazon Connect.

" + }, + "CreateIntegrationAssociation":{ + "name":"CreateIntegrationAssociation", + "http":{ + "method":"PUT", + "requestUri":"/instance/{InstanceId}/integration-associations" + }, + "input":{"shape":"CreateIntegrationAssociationRequest"}, + "output":{"shape":"CreateIntegrationAssociationResponse"}, + "errors":[ + {"shape":"DuplicateResourceException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

This API is in preview release for Amazon Connect and is subject to change.

Create an AppIntegration association with anAmazon Connect instance.

" + }, "CreateRoutingProfile":{ "name":"CreateRoutingProfile", "http":{ @@ -68,6 +193,23 @@ ], "documentation":"

Creates a new routing profile.

" }, + "CreateUseCase":{ + "name":"CreateUseCase", + "http":{ + "method":"PUT", + "requestUri":"/instance/{InstanceId}/integration-associations/{IntegrationAssociationId}/use-cases" + }, + "input":{"shape":"CreateUseCaseRequest"}, + "output":{"shape":"CreateUseCaseResponse"}, + "errors":[ + {"shape":"DuplicateResourceException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

This API is in preview release for Amazon Connect and is subject to change.

Creates a use case for an AppIntegration association.

" + }, "CreateUser":{ "name":"CreateUser", "http":{ @@ -87,6 +229,69 @@ ], "documentation":"

Creates a user account for the specified Amazon Connect instance.

For information about how to create user accounts using the Amazon Connect console, see Add Users in the Amazon Connect Administrator Guide.

" }, + "CreateUserHierarchyGroup":{ + "name":"CreateUserHierarchyGroup", + "http":{ + "method":"PUT", + "requestUri":"/user-hierarchy-groups/{InstanceId}" + }, + "input":{"shape":"CreateUserHierarchyGroupRequest"}, + "output":{"shape":"CreateUserHierarchyGroupResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"DuplicateResourceException"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Creates a new user hierarchy group.

" + }, + "DeleteInstance":{ + "name":"DeleteInstance", + "http":{ + "method":"DELETE", + "requestUri":"/instance/{InstanceId}" + }, + "input":{"shape":"DeleteInstanceRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

This API is in preview release for Amazon Connect and is subject to change.

Deletes the Amazon Connect instance.

" + }, + "DeleteIntegrationAssociation":{ + "name":"DeleteIntegrationAssociation", + "http":{ + "method":"DELETE", + "requestUri":"/instance/{InstanceId}/integration-associations/{IntegrationAssociationId}" + }, + "input":{"shape":"DeleteIntegrationAssociationRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

This API is in preview release for Amazon Connect and is subject to change.

Deletes an AppIntegration association from an Amazon Connect instance. The association must not have any use cases associated with it.

" + }, + "DeleteUseCase":{ + "name":"DeleteUseCase", + "http":{ + "method":"DELETE", + "requestUri":"/instance/{InstanceId}/integration-associations/{IntegrationAssociationId}/use-cases/{UseCaseId}" + }, + "input":{"shape":"DeleteUseCaseRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

This API is in preview release for Amazon Connect and is subject to change.

Deletes a use case from an AppIntegration association.

" + }, "DeleteUser":{ "name":"DeleteUser", "http":{ @@ -103,6 +308,23 @@ ], "documentation":"

Deletes a user account from the specified Amazon Connect instance.

For information about what happens to a user's data when their account is deleted, see Delete Users from Your Amazon Connect Instance in the Amazon Connect Administrator Guide.

" }, + "DeleteUserHierarchyGroup":{ + "name":"DeleteUserHierarchyGroup", + "http":{ + "method":"DELETE", + "requestUri":"/user-hierarchy-groups/{InstanceId}/{HierarchyGroupId}" + }, + "input":{"shape":"DeleteUserHierarchyGroupRequest"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Deletes an existing user hierarchy group. It must not be associated with any agents or have any active child groups.

" + }, "DescribeContactFlow":{ "name":"DescribeContactFlow", "http":{ @@ -121,6 +343,55 @@ ], "documentation":"

Describes the specified contact flow.

You can also create and update contact flows using the Amazon Connect Flow language.

" }, + "DescribeInstance":{ + "name":"DescribeInstance", + "http":{ + "method":"GET", + "requestUri":"/instance/{InstanceId}" + }, + "input":{"shape":"DescribeInstanceRequest"}, + "output":{"shape":"DescribeInstanceResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Returns the current state of the specified instance identifier. It tracks the instance while it is being created and returns an error status if applicable.

If an instance is not created successfully, the instance status reason field returns details relevant to the reason. The instance in a failed state is returned only for 24 hours after the CreateInstance API was invoked.

" + }, + "DescribeInstanceAttribute":{ + "name":"DescribeInstanceAttribute", + "http":{ + "method":"GET", + "requestUri":"/instance/{InstanceId}/attribute/{AttributeType}" + }, + "input":{"shape":"DescribeInstanceAttributeRequest"}, + "output":{"shape":"DescribeInstanceAttributeResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Describes the specified instance attribute.

" + }, + "DescribeInstanceStorageConfig":{ + "name":"DescribeInstanceStorageConfig", + "http":{ + "method":"GET", + "requestUri":"/instance/{InstanceId}/storage-config/{AssociationId}" + }, + "input":{"shape":"DescribeInstanceStorageConfigRequest"}, + "output":{"shape":"DescribeInstanceStorageConfigResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Retrieves the current storage configurations for the specified resource type, association ID, and instance ID.

" + }, "DescribeRoutingProfile":{ "name":"DescribeRoutingProfile", "http":{ @@ -189,6 +460,70 @@ ], "documentation":"

Describes the hierarchy structure of the specified Amazon Connect instance.

" }, + "DisassociateApprovedOrigin":{ + "name":"DisassociateApprovedOrigin", + "http":{ + "method":"DELETE", + "requestUri":"/instance/{InstanceId}/approved-origin" + }, + "input":{"shape":"DisassociateApprovedOriginRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Revokes access to integrated applications from Amazon Connect.

" + }, + "DisassociateInstanceStorageConfig":{ + "name":"DisassociateInstanceStorageConfig", + "http":{ + "method":"DELETE", + "requestUri":"/instance/{InstanceId}/storage-config/{AssociationId}" + }, + "input":{"shape":"DisassociateInstanceStorageConfigRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Removes the storage type configurations for the specified resource type and association ID.

" + }, + "DisassociateLambdaFunction":{ + "name":"DisassociateLambdaFunction", + "http":{ + "method":"DELETE", + "requestUri":"/instance/{InstanceId}/lambda-function" + }, + "input":{"shape":"DisassociateLambdaFunctionRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Remove the Lambda function from the drop-down options available in the relevant contact flow blocks.

" + }, + "DisassociateLexBot":{ + "name":"DisassociateLexBot", + "http":{ + "method":"DELETE", + "requestUri":"/instance/{InstanceId}/lex-bot" + }, + "input":{"shape":"DisassociateLexBotRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Revokes authorization from the specified instance to access the specified Amazon Lex bot.

" + }, "DisassociateRoutingProfileQueues":{ "name":"DisassociateRoutingProfileQueues", "http":{ @@ -205,6 +540,22 @@ ], "documentation":"

Disassociates a set of queues from a routing profile.

" }, + "DisassociateSecurityKey":{ + "name":"DisassociateSecurityKey", + "http":{ + "method":"DELETE", + "requestUri":"/instance/{InstanceId}/security-key/{AssociationId}" + }, + "input":{"shape":"DisassociateSecurityKeyRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Deletes the specified security key.

" + }, "GetContactAttributes":{ "name":"GetContactAttributes", "http":{ @@ -272,6 +623,23 @@ ], "documentation":"

Gets historical metric data from the specified Amazon Connect instance.

For a description of each historical metric, see Historical Metrics Definitions in the Amazon Connect Administrator Guide.

" }, + "ListApprovedOrigins":{ + "name":"ListApprovedOrigins", + "http":{ + "method":"GET", + "requestUri":"/instance/{InstanceId}/approved-origins" + }, + "input":{"shape":"ListApprovedOriginsRequest"}, + "output":{"shape":"ListApprovedOriginsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Returns a paginated list of all approved origins associated with the instance.

" + }, "ListContactFlows":{ "name":"ListContactFlows", "http":{ @@ -306,31 +674,129 @@ ], "documentation":"

Provides information about the hours of operation for the specified Amazon Connect instance.

For more information about hours of operation, see Set the Hours of Operation for a Queue in the Amazon Connect Administrator Guide.

" }, - "ListPhoneNumbers":{ - "name":"ListPhoneNumbers", + "ListInstanceAttributes":{ + "name":"ListInstanceAttributes", "http":{ "method":"GET", - "requestUri":"/phone-numbers-summary/{InstanceId}" + "requestUri":"/instance/{InstanceId}/attributes" }, - "input":{"shape":"ListPhoneNumbersRequest"}, - "output":{"shape":"ListPhoneNumbersResponse"}, + "input":{"shape":"ListInstanceAttributesRequest"}, + "output":{"shape":"ListInstanceAttributesResponse"}, "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServiceException"}, {"shape":"InvalidRequestException"}, {"shape":"InvalidParameterException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"ThrottlingException"}, - {"shape":"InternalServiceException"} + {"shape":"ThrottlingException"} ], - "documentation":"

Provides information about the phone numbers for the specified Amazon Connect instance.

For more information about phone numbers, see Set Up Phone Numbers for Your Contact Center in the Amazon Connect Administrator Guide.

" + "documentation":"

This API is in preview release for Amazon Connect and is subject to change.

Returns a paginated list of all attribute types for the given instance.

" }, - "ListPrompts":{ - "name":"ListPrompts", + "ListInstanceStorageConfigs":{ + "name":"ListInstanceStorageConfigs", "http":{ "method":"GET", - "requestUri":"/prompts-summary/{InstanceId}" + "requestUri":"/instance/{InstanceId}/storage-configs" }, - "input":{"shape":"ListPromptsRequest"}, - "output":{"shape":"ListPromptsResponse"}, + "input":{"shape":"ListInstanceStorageConfigsRequest"}, + "output":{"shape":"ListInstanceStorageConfigsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

This API is in preview release for Amazon Connect and is subject to change.

Returns a paginated list of storage configs for the identified instance and resource type.

" + }, + "ListInstances":{ + "name":"ListInstances", + "http":{ + "method":"GET", + "requestUri":"/instance" + }, + "input":{"shape":"ListInstancesRequest"}, + "output":{"shape":"ListInstancesResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

This API is in preview release for Amazon Connect and is subject to change.

Return a list of instances which are in active state, creation-in-progress state, and failed state. Instances that aren't successfully created (they are in a failed state) are returned only for 24 hours after the CreateInstance API was invoked.

" + }, + "ListIntegrationAssociations":{ + "name":"ListIntegrationAssociations", + "http":{ + "method":"GET", + "requestUri":"/instance/{InstanceId}/integration-associations" + }, + "input":{"shape":"ListIntegrationAssociationsRequest"}, + "output":{"shape":"ListIntegrationAssociationsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

This API is in preview release for Amazon Connect and is subject to change.

Provides summary information about the AppIntegration associations for the specified Amazon Connect instance.

" + }, + "ListLambdaFunctions":{ + "name":"ListLambdaFunctions", + "http":{ + "method":"GET", + "requestUri":"/instance/{InstanceId}/lambda-functions" + }, + "input":{"shape":"ListLambdaFunctionsRequest"}, + "output":{"shape":"ListLambdaFunctionsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Returns a paginated list of all the Lambda functions that show up in the drop-down options in the relevant contact flow blocks.

" + }, + "ListLexBots":{ + "name":"ListLexBots", + "http":{ + "method":"GET", + "requestUri":"/instance/{InstanceId}/lex-bots" + }, + "input":{"shape":"ListLexBotsRequest"}, + "output":{"shape":"ListLexBotsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Returns a paginated list of all the Amazon Lex bots currently associated with the instance.

" + }, + "ListPhoneNumbers":{ + "name":"ListPhoneNumbers", + "http":{ + "method":"GET", + "requestUri":"/phone-numbers-summary/{InstanceId}" + }, + "input":{"shape":"ListPhoneNumbersRequest"}, + "output":{"shape":"ListPhoneNumbersResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Provides information about the phone numbers for the specified Amazon Connect instance.

For more information about phone numbers, see Set Up Phone Numbers for Your Contact Center in the Amazon Connect Administrator Guide.

" + }, + "ListPrompts":{ + "name":"ListPrompts", + "http":{ + "method":"GET", + "requestUri":"/prompts-summary/{InstanceId}" + }, + "input":{"shape":"ListPromptsRequest"}, + "output":{"shape":"ListPromptsResponse"}, "errors":[ {"shape":"InvalidRequestException"}, {"shape":"InvalidParameterException"}, @@ -391,6 +857,23 @@ ], "documentation":"

Provides summary information about the routing profiles for the specified Amazon Connect instance.

For more information about routing profiles, see Routing Profiles and Create a Routing Profile in the Amazon Connect Administrator Guide.

" }, + "ListSecurityKeys":{ + "name":"ListSecurityKeys", + "http":{ + "method":"GET", + "requestUri":"/instance/{InstanceId}/security-keys" + }, + "input":{"shape":"ListSecurityKeysRequest"}, + "output":{"shape":"ListSecurityKeysResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Returns a paginated list of all security keys associated with the instance.

" + }, "ListSecurityProfiles":{ "name":"ListSecurityProfiles", "http":{ @@ -425,6 +908,22 @@ ], "documentation":"

Lists the tags for the specified resource.

For sample policies that use tags, see Amazon Connect Identity-Based Policy Examples in the Amazon Connect Administrator Guide.

" }, + "ListUseCases":{ + "name":"ListUseCases", + "http":{ + "method":"GET", + "requestUri":"/instance/{InstanceId}/integration-associations/{IntegrationAssociationId}/use-cases" + }, + "input":{"shape":"ListUseCasesRequest"}, + "output":{"shape":"ListUseCasesResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

This API is in preview release for Amazon Connect and is subject to change.

List the use cases.

" + }, "ListUserHierarchyGroups":{ "name":"ListUserHierarchyGroups", "http":{ @@ -526,6 +1025,24 @@ ], "documentation":"

This API places an outbound call to a contact, and then initiates the contact flow. It performs the actions in the contact flow that's specified (in ContactFlowId).

Agents are not involved in initiating the outbound API (that is, dialing the contact). If the contact flow places an outbound call to a contact, and then puts the contact in queue, that's when the call is routed to the agent, like any other inbound case.

There is a 60 second dialing timeout for this operation. If the call is not connected after 60 seconds, it fails.

UK numbers with a 447 prefix are not allowed by default. Before you can dial these UK mobile numbers, you must submit a service quota increase request. For more information, see Amazon Connect Service Quotas in the Amazon Connect Administrator Guide.

" }, + "StartTaskContact":{ + "name":"StartTaskContact", + "http":{ + "method":"PUT", + "requestUri":"/contact/task" + }, + "input":{"shape":"StartTaskContactRequest"}, + "output":{"shape":"StartTaskContactResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Initiates a contact flow to start a new task.

" + }, "StopContact":{ "name":"StopContact", "http":{ @@ -619,7 +1136,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Creates or updates the contact attributes associated with the specified contact.

You can add or update attributes for both ongoing and completed contacts. For example, you can update the customer's name or the reason the customer called while the call is active, or add notes about steps that the agent took during the call that are displayed to the next agent that takes the call. You can also update attributes for a contact using data from your CRM application and save the data with the contact in Amazon Connect. You could also flag calls for additional analysis, such as legal review or identifying abusive callers.

Contact attributes are available in Amazon Connect for 24 months, and are then deleted.

This operation is also available in the Amazon Connect Flow language. See UpdateContactAttributes.

Important: You cannot use the operation to update attributes for contacts that occurred prior to the release of the API, September 12, 2018. You can update attributes only for contacts that started after the release of the API. If you attempt to update attributes for a contact that occurred prior to the release of the API, a 400 error is returned. This applies also to queued callbacks that were initiated prior to the release of the API but are still active in your instance.

" + "documentation":"

Creates or updates the contact attributes associated with the specified contact.

You can add or update attributes for both ongoing and completed contacts. For example, you can update the customer's name or the reason the customer called while the call is active, or add notes about steps that the agent took during the call that are displayed to the next agent that takes the call. You can also update attributes for a contact using data from your CRM application and save the data with the contact in Amazon Connect. You could also flag calls for additional analysis, such as legal review or identifying abusive callers.

Contact attributes are available in Amazon Connect for 24 months, and are then deleted.

Important: You cannot use the operation to update attributes for contacts that occurred prior to the release of the API, September 12, 2018. You can update attributes only for contacts that started after the release of the API. If you attempt to update attributes for a contact that occurred prior to the release of the API, a 400 error is returned. This applies also to queued callbacks that were initiated prior to the release of the API but are still active in your instance.

" }, "UpdateContactFlowContent":{ "name":"UpdateContactFlowContent", @@ -653,7 +1170,39 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServiceException"} ], - "documentation":"

The name of the contact flow.

" + "documentation":"

The name of the contact flow.

You can also create and update contact flows using the Amazon Connect Flow language.

" + }, + "UpdateInstanceAttribute":{ + "name":"UpdateInstanceAttribute", + "http":{ + "method":"POST", + "requestUri":"/instance/{InstanceId}/attribute/{AttributeType}" + }, + "input":{"shape":"UpdateInstanceAttributeRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

This API is in preview release for Amazon Connect and is subject to change.

Updates the value for the specified attribute type.

" + }, + "UpdateInstanceStorageConfig":{ + "name":"UpdateInstanceStorageConfig", + "http":{ + "method":"POST", + "requestUri":"/instance/{InstanceId}/storage-config/{AssociationId}" + }, + "input":{"shape":"UpdateInstanceStorageConfigRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

This API is in preview release for Amazon Connect and is subject to change.

Updates an existing configuration for a resource type. This API is idempotent.

" }, "UpdateRoutingProfileConcurrency":{ "name":"UpdateRoutingProfileConcurrency", @@ -736,6 +1285,40 @@ ], "documentation":"

Assigns the specified hierarchy group to the specified user.

" }, + "UpdateUserHierarchyGroupName":{ + "name":"UpdateUserHierarchyGroupName", + "http":{ + "method":"POST", + "requestUri":"/user-hierarchy-groups/{InstanceId}/{HierarchyGroupId}/name" + }, + "input":{"shape":"UpdateUserHierarchyGroupNameRequest"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"DuplicateResourceException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Updates the name of the user hierarchy group.

" + }, + "UpdateUserHierarchyStructure":{ + "name":"UpdateUserHierarchyStructure", + "http":{ + "method":"POST", + "requestUri":"/user-hierarchy-structure/{InstanceId}" + }, + "input":{"shape":"UpdateUserHierarchyStructureRequest"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Updates the user hierarchy structure: add, remove, and rename user hierarchy levels.

" + }, "UpdateUserIdentityInfo":{ "name":"UpdateUserIdentityInfo", "http":{ @@ -822,6 +1405,96 @@ "max":100, "min":1 }, + "AssociateApprovedOriginRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "Origin" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance.

", + "location":"uri", + "locationName":"InstanceId" + }, + "Origin":{ + "shape":"Origin", + "documentation":"

The domain to add to your allow list.

" + } + } + }, + "AssociateInstanceStorageConfigRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "ResourceType", + "StorageConfig" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance.

", + "location":"uri", + "locationName":"InstanceId" + }, + "ResourceType":{ + "shape":"InstanceStorageResourceType", + "documentation":"

A valid resource type.

" + }, + "StorageConfig":{ + "shape":"InstanceStorageConfig", + "documentation":"

A valid storage type.

" + } + } + }, + "AssociateInstanceStorageConfigResponse":{ + "type":"structure", + "members":{ + "AssociationId":{ + "shape":"AssociationId", + "documentation":"

The existing association identifier that uniquely identifies the resource type and storage config for the given instance ID.

" + } + } + }, + "AssociateLambdaFunctionRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "FunctionArn" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance.

", + "location":"uri", + "locationName":"InstanceId" + }, + "FunctionArn":{ + "shape":"FunctionArn", + "documentation":"

The Amazon Resource Name (ARN) for the Lambda function being associated. Maximum number of characters allowed is 140.

" + } + } + }, + "AssociateLexBotRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "LexBot" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance.

", + "location":"uri", + "locationName":"InstanceId" + }, + "LexBot":{ + "shape":"LexBot", + "documentation":"

The Amazon Lex box to associate with the instance.

" + } + } + }, "AssociateRoutingProfileQueuesRequest":{ "type":"structure", "required":[ @@ -848,6 +1521,53 @@ } } }, + "AssociateSecurityKeyRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "Key" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance.

", + "location":"uri", + "locationName":"InstanceId" + }, + "Key":{ + "shape":"PEM", + "documentation":"

A valid security key in PEM format.

" + } + } + }, + "AssociateSecurityKeyResponse":{ + "type":"structure", + "members":{ + "AssociationId":{ + "shape":"AssociationId", + "documentation":"

The existing association identifier that uniquely identifies the resource type and storage config for the given instance ID.

" + } + } + }, + "AssociationId":{ + "type":"string", + "max":100, + "min":1 + }, + "Attribute":{ + "type":"structure", + "members":{ + "AttributeType":{ + "shape":"InstanceAttributeType", + "documentation":"

The type of attribute.

" + }, + "Value":{ + "shape":"InstanceAttributeValue", + "documentation":"

The value of the attribute.

" + } + }, + "documentation":"

A toggle for an individual feature at the instance level.

" + }, "AttributeName":{ "type":"string", "max":32767, @@ -863,18 +1583,32 @@ "key":{"shape":"AttributeName"}, "value":{"shape":"AttributeValue"} }, + "AttributesList":{ + "type":"list", + "member":{"shape":"Attribute"} + }, "AutoAccept":{"type":"boolean"}, + "BotName":{ + "type":"string", + "max":50 + }, + "BucketName":{ + "type":"string", + "max":128, + "min":1 + }, "Channel":{ "type":"string", "enum":[ "VOICE", - "CHAT" + "CHAT", + "TASK" ] }, "Channels":{ "type":"list", "member":{"shape":"Channel"}, - "max":1 + "max":3 }, "ChatContent":{ "type":"string", @@ -1032,6 +1766,11 @@ "error":{"httpStatusCode":410}, "exception":true }, + "ContactReferences":{ + "type":"map", + "key":{"shape":"ReferenceKey"}, + "value":{"shape":"Reference"} + }, "CreateContactFlowRequest":{ "type":"structure", "required":[ @@ -1082,13 +1821,112 @@ } } }, - "CreateRoutingProfileRequest":{ + "CreateInstanceRequest":{ "type":"structure", "required":[ - "InstanceId", - "Name", - "Description", - "DefaultOutboundQueueId", + "IdentityManagementType", + "InboundCallsEnabled", + "OutboundCallsEnabled" + ], + "members":{ + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

The idempotency token.

" + }, + "IdentityManagementType":{ + "shape":"DirectoryType", + "documentation":"

The type of identity management for your Amazon Connect users.

" + }, + "InstanceAlias":{ + "shape":"DirectoryAlias", + "documentation":"

The name for your instance.

" + }, + "DirectoryId":{ + "shape":"DirectoryId", + "documentation":"

The identifier for the directory.

" + }, + "InboundCallsEnabled":{ + "shape":"InboundCallsEnabled", + "documentation":"

Whether your contact center handles incoming contacts.

" + }, + "OutboundCallsEnabled":{ + "shape":"OutboundCallsEnabled", + "documentation":"

Whether your contact center allows outbound calls.

" + } + } + }, + "CreateInstanceResponse":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"InstanceId", + "documentation":"

The identifier for the instance.

" + }, + "Arn":{ + "shape":"ARN", + "documentation":"

The Amazon Resource Name (ARN) of the instance.

" + } + } + }, + "CreateIntegrationAssociationRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "IntegrationType", + "IntegrationArn", + "SourceApplicationUrl", + "SourceApplicationName", + "SourceType" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance.

", + "location":"uri", + "locationName":"InstanceId" + }, + "IntegrationType":{ + "shape":"IntegrationType", + "documentation":"

The type of information to be ingested.

" + }, + "IntegrationArn":{ + "shape":"ARN", + "documentation":"

The Amazon Resource Name (ARN) of the integration.

" + }, + "SourceApplicationUrl":{ + "shape":"URI", + "documentation":"

The URL for the external application.

" + }, + "SourceApplicationName":{ + "shape":"SourceApplicationName", + "documentation":"

The name of the external application.

" + }, + "SourceType":{ + "shape":"SourceType", + "documentation":"

The type of the data source.

" + } + } + }, + "CreateIntegrationAssociationResponse":{ + "type":"structure", + "members":{ + "IntegrationAssociationId":{ + "shape":"IntegrationAssociationId", + "documentation":"

The identifier for the association.

" + }, + "IntegrationAssociationArn":{ + "shape":"ARN", + "documentation":"

The Amazon Resource Name (ARN) for the association.

" + } + } + }, + "CreateRoutingProfileRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "Name", + "Description", + "DefaultOutboundQueueId", "MediaConcurrencies" ], "members":{ @@ -1137,6 +1975,81 @@ } } }, + "CreateUseCaseRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "IntegrationAssociationId", + "UseCaseType" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance.

", + "location":"uri", + "locationName":"InstanceId" + }, + "IntegrationAssociationId":{ + "shape":"IntegrationAssociationId", + "documentation":"

The identifier for the AppIntegration association.

", + "location":"uri", + "locationName":"IntegrationAssociationId" + }, + "UseCaseType":{ + "shape":"UseCaseType", + "documentation":"

The type of use case to associate to the AppIntegration association. Each AppIntegration association can have only one of each use case type.

" + } + } + }, + "CreateUseCaseResponse":{ + "type":"structure", + "members":{ + "UseCaseId":{ + "shape":"UseCaseId", + "documentation":"

The identifier of the use case.

" + }, + "UseCaseArn":{ + "shape":"ARN", + "documentation":"

The Amazon Resource Name (ARN) for the use case.

" + } + } + }, + "CreateUserHierarchyGroupRequest":{ + "type":"structure", + "required":[ + "Name", + "InstanceId" + ], + "members":{ + "Name":{ + "shape":"HierarchyGroupName", + "documentation":"

The name of the user hierarchy group. Must not be more than 100 characters.

" + }, + "ParentGroupId":{ + "shape":"HierarchyGroupId", + "documentation":"

The identifier for the parent hierarchy group. The user hierarchy is created at level one if the parent group ID is null.

" + }, + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance.

", + "location":"uri", + "locationName":"InstanceId" + } + } + }, + "CreateUserHierarchyGroupResponse":{ + "type":"structure", + "members":{ + "HierarchyGroupId":{ + "shape":"HierarchyGroupId", + "documentation":"

The identifier of the hierarchy group.

" + }, + "HierarchyGroupArn":{ + "shape":"ARN", + "documentation":"

The Amazon Resource Name (ARN) of the hierarchy group.

" + } + } + }, "CreateUserRequest":{ "type":"structure", "required":[ @@ -1305,6 +2218,88 @@ "max":9999, "min":0 }, + "DeleteInstanceRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance.

", + "location":"uri", + "locationName":"InstanceId" + } + } + }, + "DeleteIntegrationAssociationRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "IntegrationAssociationId" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance.

", + "location":"uri", + "locationName":"InstanceId" + }, + "IntegrationAssociationId":{ + "shape":"IntegrationAssociationId", + "documentation":"

The identifier for the AppIntegration association.

", + "location":"uri", + "locationName":"IntegrationAssociationId" + } + } + }, + "DeleteUseCaseRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "IntegrationAssociationId", + "UseCaseId" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance.

", + "location":"uri", + "locationName":"InstanceId" + }, + "IntegrationAssociationId":{ + "shape":"IntegrationAssociationId", + "documentation":"

The identifier for the AppIntegration association.

", + "location":"uri", + "locationName":"IntegrationAssociationId" + }, + "UseCaseId":{ + "shape":"UseCaseId", + "documentation":"

The identifier for the use case.

", + "location":"uri", + "locationName":"UseCaseId" + } + } + }, + "DeleteUserHierarchyGroupRequest":{ + "type":"structure", + "required":[ + "HierarchyGroupId", + "InstanceId" + ], + "members":{ + "HierarchyGroupId":{ + "shape":"HierarchyGroupId", + "documentation":"

The identifier of the hierarchy group.

", + "location":"uri", + "locationName":"HierarchyGroupId" + }, + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance.

", + "location":"uri", + "locationName":"InstanceId" + } + } + }, "DeleteUserRequest":{ "type":"structure", "required":[ @@ -1356,6 +2351,94 @@ } } }, + "DescribeInstanceAttributeRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "AttributeType" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance.

", + "location":"uri", + "locationName":"InstanceId" + }, + "AttributeType":{ + "shape":"InstanceAttributeType", + "documentation":"

The type of attribute.

", + "location":"uri", + "locationName":"AttributeType" + } + } + }, + "DescribeInstanceAttributeResponse":{ + "type":"structure", + "members":{ + "Attribute":{ + "shape":"Attribute", + "documentation":"

The type of attribute.

" + } + } + }, + "DescribeInstanceRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance.

", + "location":"uri", + "locationName":"InstanceId" + } + } + }, + "DescribeInstanceResponse":{ + "type":"structure", + "members":{ + "Instance":{ + "shape":"Instance", + "documentation":"

The name of the instance.

" + } + } + }, + "DescribeInstanceStorageConfigRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "AssociationId", + "ResourceType" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance.

", + "location":"uri", + "locationName":"InstanceId" + }, + "AssociationId":{ + "shape":"AssociationId", + "documentation":"

The existing association identifier that uniquely identifies the resource type and storage config for the given instance ID.

", + "location":"uri", + "locationName":"AssociationId" + }, + "ResourceType":{ + "shape":"InstanceStorageResourceType", + "documentation":"

A valid resource type.

", + "location":"querystring", + "locationName":"resourceType" + } + } + }, + "DescribeInstanceStorageConfigResponse":{ + "type":"structure", + "members":{ + "StorageConfig":{ + "shape":"InstanceStorageConfig", + "documentation":"

A valid storage type.

" + } + } + }, "DescribeRoutingProfileRequest":{ "type":"structure", "required":[ @@ -1467,6 +2550,11 @@ } } }, + "Description":{ + "type":"string", + "max":4096, + "min":0 + }, "DestinationNotAllowedException":{ "type":"structure", "members":{ @@ -1493,13 +2581,33 @@ }, "documentation":"

Contains information about the dimensions for a set of metrics.

" }, + "DirectoryAlias":{ + "type":"string", + "max":62, + "min":1, + "pattern":"^(?!d-)([\\da-zA-Z]+)([-]*[\\da-zA-Z])*$", + "sensitive":true + }, + "DirectoryId":{ + "type":"string", + "max":12, + "min":12, + "pattern":"^d-[0-9a-f]{10}$" + }, + "DirectoryType":{ + "type":"string", + "enum":[ + "SAML", + "CONNECT_MANAGED", + "EXISTING_DIRECTORY" + ] + }, "DirectoryUserId":{"type":"string"}, - "DisassociateRoutingProfileQueuesRequest":{ + "DisassociateApprovedOriginRequest":{ "type":"structure", "required":[ "InstanceId", - "RoutingProfileId", - "QueueReferences" + "Origin" ], "members":{ "InstanceId":{ @@ -1508,20 +2616,140 @@ "location":"uri", "locationName":"InstanceId" }, - "RoutingProfileId":{ - "shape":"RoutingProfileId", - "documentation":"

The identifier of the routing profile.

", - "location":"uri", - "locationName":"RoutingProfileId" - }, - "QueueReferences":{ - "shape":"RoutingProfileQueueReferenceList", - "documentation":"

The queues to disassociate from this routing profile.

" + "Origin":{ + "shape":"Origin", + "documentation":"

The domain URL of the integrated application.

", + "location":"querystring", + "locationName":"origin" } } }, - "DisplayName":{ - "type":"string", + "DisassociateInstanceStorageConfigRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "AssociationId", + "ResourceType" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance.

", + "location":"uri", + "locationName":"InstanceId" + }, + "AssociationId":{ + "shape":"AssociationId", + "documentation":"

The existing association identifier that uniquely identifies the resource type and storage config for the given instance ID.

", + "location":"uri", + "locationName":"AssociationId" + }, + "ResourceType":{ + "shape":"InstanceStorageResourceType", + "documentation":"

A valid resource type.

", + "location":"querystring", + "locationName":"resourceType" + } + } + }, + "DisassociateLambdaFunctionRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "FunctionArn" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance..

", + "location":"uri", + "locationName":"InstanceId" + }, + "FunctionArn":{ + "shape":"FunctionArn", + "documentation":"

The Amazon Resource Name (ARN) of the Lambda function being disassociated.

", + "location":"querystring", + "locationName":"functionArn" + } + } + }, + "DisassociateLexBotRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "BotName", + "LexRegion" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance.

", + "location":"uri", + "locationName":"InstanceId" + }, + "BotName":{ + "shape":"BotName", + "documentation":"

The name of the Amazon Lex bot. Maximum character limit of 50.

", + "location":"querystring", + "locationName":"botName" + }, + "LexRegion":{ + "shape":"LexRegion", + "documentation":"

The Region in which the Amazon Lex bot has been created.

", + "location":"querystring", + "locationName":"lexRegion" + } + } + }, + "DisassociateRoutingProfileQueuesRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "RoutingProfileId", + "QueueReferences" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance.

", + "location":"uri", + "locationName":"InstanceId" + }, + "RoutingProfileId":{ + "shape":"RoutingProfileId", + "documentation":"

The identifier of the routing profile.

", + "location":"uri", + "locationName":"RoutingProfileId" + }, + "QueueReferences":{ + "shape":"RoutingProfileQueueReferenceList", + "documentation":"

The queues to disassociate from this routing profile.

" + } + } + }, + "DisassociateSecurityKeyRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "AssociationId" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance.

", + "location":"uri", + "locationName":"InstanceId" + }, + "AssociationId":{ + "shape":"AssociationId", + "documentation":"

The existing association identifier that uniquely identifies the resource type and storage config for the given instance ID.

", + "location":"uri", + "locationName":"AssociationId" + } + } + }, + "DisplayName":{ + "type":"string", "max":256, "min":1 }, @@ -1535,6 +2763,28 @@ "exception":true }, "Email":{"type":"string"}, + "EncryptionConfig":{ + "type":"structure", + "required":[ + "EncryptionType", + "KeyId" + ], + "members":{ + "EncryptionType":{ + "shape":"EncryptionType", + "documentation":"

The type of encryption.

" + }, + "KeyId":{ + "shape":"KeyId", + "documentation":"

The identifier of the encryption key.

" + } + }, + "documentation":"

The encryption configuration.

" + }, + "EncryptionType":{ + "type":"string", + "enum":["KMS"] + }, "Filters":{ "type":"structure", "members":{ @@ -1549,6 +2799,15 @@ }, "documentation":"

Contains the filter to apply when retrieving metrics.

" }, + "FunctionArn":{ + "type":"string", + "max":140, + "min":1 + }, + "FunctionArnsList":{ + "type":"list", + "member":{"shape":"FunctionArn"} + }, "GetContactAttributesRequest":{ "type":"structure", "required":[ @@ -1595,11 +2854,11 @@ }, "Filters":{ "shape":"Filters", - "documentation":"

The queues, up to 100, or channels, to use to filter the metrics returned. Metric data is retrieved only for the resources associated with the queues or channels included in the filter. You can include both queue IDs and queue ARNs in the same request. Both VOICE and CHAT channels are supported.

" + "documentation":"

The queues, up to 100, or channels, to use to filter the metrics returned. Metric data is retrieved only for the resources associated with the queues or channels included in the filter. You can include both queue IDs and queue ARNs in the same request. VOICE, CHAT, and TASK channels are supported.

" }, "Groupings":{ "shape":"Groupings", - "documentation":"

The grouping applied to the metrics returned. For example, when grouped by QUEUE, the metrics returned apply to each queue rather than aggregated for all queues. If you group by CHANNEL, you should include a Channels filter. Both VOICE and CHAT channels are supported.

If no Grouping is included in the request, a summary of metrics is returned.

" + "documentation":"

The grouping applied to the metrics returned. For example, when grouped by QUEUE, the metrics returned apply to each queue rather than aggregated for all queues. If you group by CHANNEL, you should include a Channels filter. VOICE, CHAT, and TASK channels are supported.

If no Grouping is included in the request, a summary of metrics is returned.

" }, "CurrentMetrics":{ "shape":"CurrentMetrics", @@ -1680,7 +2939,7 @@ }, "Filters":{ "shape":"Filters", - "documentation":"

The queues, up to 100, or channels, to use to filter the metrics returned. Metric data is retrieved only for the resources associated with the queues or channels included in the filter. You can include both queue IDs and queue ARNs in the same request. Both VOICE and CHAT channels are supported.

" + "documentation":"

The queues, up to 100, or channels, to use to filter the metrics returned. Metric data is retrieved only for the resources associated with the queues or channels included in the filter. You can include both queue IDs and queue ARNs in the same request. VOICE, CHAT, and TASK channels are supported.

" }, "Groupings":{ "shape":"Groupings", @@ -1796,6 +3055,17 @@ }, "HierarchyLevelId":{"type":"string"}, "HierarchyLevelName":{"type":"string"}, + "HierarchyLevelUpdate":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"HierarchyLevelName", + "documentation":"

The name of the user hierarchy level. Must not be more than 50 characters.

" + } + }, + "documentation":"

Contains information about the hierarchy level to update.

" + }, "HierarchyPath":{ "type":"structure", "members":{ @@ -1848,6 +3118,32 @@ }, "documentation":"

Contains information about a hierarchy structure.

" }, + "HierarchyStructureUpdate":{ + "type":"structure", + "members":{ + "LevelOne":{ + "shape":"HierarchyLevelUpdate", + "documentation":"

The update for level one.

" + }, + "LevelTwo":{ + "shape":"HierarchyLevelUpdate", + "documentation":"

The update for level two.

" + }, + "LevelThree":{ + "shape":"HierarchyLevelUpdate", + "documentation":"

The update for level three.

" + }, + "LevelFour":{ + "shape":"HierarchyLevelUpdate", + "documentation":"

The update for level four.

" + }, + "LevelFive":{ + "shape":"HierarchyLevelUpdate", + "documentation":"

The update for level five.

" + } + }, + "documentation":"

Contains information about the level hierarchy to update.

" + }, "HistoricalMetric":{ "type":"structure", "members":{ @@ -1921,118 +3217,692 @@ "SERVICE_LEVEL" ] }, - "HistoricalMetricResult":{ + "HistoricalMetricResult":{ + "type":"structure", + "members":{ + "Dimensions":{ + "shape":"Dimensions", + "documentation":"

The dimension for the metrics.

" + }, + "Collections":{ + "shape":"HistoricalMetricDataCollections", + "documentation":"

The set of metrics.

" + } + }, + "documentation":"

Contains information about the historical metrics retrieved.

" + }, + "HistoricalMetricResults":{ + "type":"list", + "member":{"shape":"HistoricalMetricResult"} + }, + "HistoricalMetrics":{ + "type":"list", + "member":{"shape":"HistoricalMetric"} + }, + "Hours":{ + "type":"integer", + "max":87600, + "min":0 + }, + "HoursOfOperationId":{"type":"string"}, + "HoursOfOperationName":{"type":"string"}, + "HoursOfOperationSummary":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"HoursOfOperationId", + "documentation":"

The identifier of the hours of operation.

" + }, + "Arn":{ + "shape":"ARN", + "documentation":"

The Amazon Resource Name (ARN) of the hours of operation.

" + }, + "Name":{ + "shape":"HoursOfOperationName", + "documentation":"

The name of the hours of operation.

" + } + }, + "documentation":"

Contains summary information about hours of operation for a contact center.

" + }, + "HoursOfOperationSummaryList":{ + "type":"list", + "member":{"shape":"HoursOfOperationSummary"} + }, + "InboundCallsEnabled":{"type":"boolean"}, + "Instance":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance.

" + }, + "Arn":{ + "shape":"ARN", + "documentation":"

The Amazon Resource Name (ARN) of the instance.

" + }, + "IdentityManagementType":{ + "shape":"DirectoryType", + "documentation":"

The identity management type.

" + }, + "InstanceAlias":{ + "shape":"DirectoryAlias", + "documentation":"

The alias of instance.

" + }, + "CreatedTime":{ + "shape":"Timestamp", + "documentation":"

When the instance was created.

" + }, + "ServiceRole":{ + "shape":"ARN", + "documentation":"

The service role of the instance.

" + }, + "InstanceStatus":{ + "shape":"InstanceStatus", + "documentation":"

The state of the instance.

" + }, + "StatusReason":{ + "shape":"InstanceStatusReason", + "documentation":"

Relevant details why the instance was not successfully created.

" + }, + "InboundCallsEnabled":{ + "shape":"InboundCallsEnabled", + "documentation":"

Whether inbound calls are enabled.

" + }, + "OutboundCallsEnabled":{ + "shape":"OutboundCallsEnabled", + "documentation":"

Whether outbound calls are enabled.

" + } + }, + "documentation":"

The Amazon Connect instance.

" + }, + "InstanceAttributeType":{ + "type":"string", + "enum":[ + "INBOUND_CALLS", + "OUTBOUND_CALLS", + "CONTACTFLOW_LOGS", + "CONTACT_LENS", + "AUTO_RESOLVE_BEST_VOICES", + "USE_CUSTOM_TTS_VOICES", + "EARLY_MEDIA" + ] + }, + "InstanceAttributeValue":{ + "type":"string", + "max":100, + "min":1 + }, + "InstanceId":{ + "type":"string", + "max":100, + "min":1 + }, + "InstanceStatus":{ + "type":"string", + "enum":[ + "CREATION_IN_PROGRESS", + "ACTIVE", + "CREATION_FAILED" + ] + }, + "InstanceStatusReason":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"String", + "documentation":"

The message.

" + } + }, + "documentation":"

Relevant details why the instance was not successfully created.

" + }, + "InstanceStorageConfig":{ + "type":"structure", + "required":["StorageType"], + "members":{ + "AssociationId":{ + "shape":"AssociationId", + "documentation":"

The existing association identifier that uniquely identifies the resource type and storage config for the given instance ID.

" + }, + "StorageType":{ + "shape":"StorageType", + "documentation":"

A valid storage type.

" + }, + "S3Config":{ + "shape":"S3Config", + "documentation":"

The S3 configuration.

" + }, + "KinesisVideoStreamConfig":{ + "shape":"KinesisVideoStreamConfig", + "documentation":"

The configuration of the Kinesis video stream.

" + }, + "KinesisStreamConfig":{ + "shape":"KinesisStreamConfig", + "documentation":"

The configuration of the Kinesis data stream.

" + }, + "KinesisFirehoseConfig":{ + "shape":"KinesisFirehoseConfig", + "documentation":"

The configuration of the Kinesis Firehose delivery stream.

" + } + }, + "documentation":"

The storage configuration for the instance.

" + }, + "InstanceStorageConfigs":{ + "type":"list", + "member":{"shape":"InstanceStorageConfig"} + }, + "InstanceStorageResourceType":{ + "type":"string", + "enum":[ + "CHAT_TRANSCRIPTS", + "CALL_RECORDINGS", + "SCHEDULED_REPORTS", + "MEDIA_STREAMS", + "CONTACT_TRACE_RECORDS", + "AGENT_EVENTS" + ] + }, + "InstanceSummary":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"InstanceId", + "documentation":"

The identifier of the instance.

" + }, + "Arn":{ + "shape":"ARN", + "documentation":"

The Amazon Resource Name (ARN) of the instance.

" + }, + "IdentityManagementType":{ + "shape":"DirectoryType", + "documentation":"

The identity management type of the instance.

" + }, + "InstanceAlias":{ + "shape":"DirectoryAlias", + "documentation":"

The alias of the instance.

" + }, + "CreatedTime":{ + "shape":"Timestamp", + "documentation":"

When the instance was created.

" + }, + "ServiceRole":{ + "shape":"ARN", + "documentation":"

The service role of the instance.

" + }, + "InstanceStatus":{ + "shape":"InstanceStatus", + "documentation":"

The state of the instance.

" + }, + "InboundCallsEnabled":{ + "shape":"InboundCallsEnabled", + "documentation":"

Whether inbound calls are enabled.

" + }, + "OutboundCallsEnabled":{ + "shape":"OutboundCallsEnabled", + "documentation":"

Whether outbound calls are enabled.

" + } + }, + "documentation":"

Information about the instance.

" + }, + "InstanceSummaryList":{ + "type":"list", + "member":{"shape":"InstanceSummary"} + }, + "IntegrationAssociationId":{ + "type":"string", + "max":200, + "min":1 + }, + "IntegrationAssociationSummary":{ + "type":"structure", + "members":{ + "IntegrationAssociationId":{ + "shape":"IntegrationAssociationId", + "documentation":"

The identifier for the AppIntegration association.

" + }, + "IntegrationAssociationArn":{ + "shape":"ARN", + "documentation":"

The Amazon Resource Name (ARN) for the AppIntegration association.

" + }, + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance.

" + }, + "IntegrationType":{ + "shape":"IntegrationType", + "documentation":"

The integration type.

" + }, + "IntegrationArn":{ + "shape":"ARN", + "documentation":"

The Amazon Resource Name (ARN) for the AppIntegration.

" + }, + "SourceApplicationUrl":{ + "shape":"URI", + "documentation":"

The URL for the external application.

" + }, + "SourceApplicationName":{ + "shape":"SourceApplicationName", + "documentation":"

The user-provided, friendly name for the external application.

" + }, + "SourceType":{ + "shape":"SourceType", + "documentation":"

The name of the source.

" + } + }, + "documentation":"

Contains summary information about the associated AppIntegrations.

" + }, + "IntegrationAssociationSummaryList":{ + "type":"list", + "member":{"shape":"IntegrationAssociationSummary"} + }, + "IntegrationType":{ + "type":"string", + "enum":["EVENT"] + }, + "InternalServiceException":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"Message", + "documentation":"

The message.

" + } + }, + "documentation":"

Request processing failed due to an error or failure with the service.

", + "error":{"httpStatusCode":500}, + "exception":true + }, + "InvalidContactFlowException":{ + "type":"structure", + "members":{ + "problems":{ + "shape":"Problems", + "documentation":"

The problems with the contact flow. Please fix before trying again.

" + } + }, + "documentation":"

The contact flow is not valid.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidParameterException":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"Message", + "documentation":"

The message.

" + } + }, + "documentation":"

One or more of the specified parameters are not valid.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidRequestException":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"Message", + "documentation":"

The message.

" + } + }, + "documentation":"

The request is not valid.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "KeyId":{ + "type":"string", + "max":128, + "min":1 + }, + "KinesisFirehoseConfig":{ + "type":"structure", + "required":["FirehoseArn"], + "members":{ + "FirehoseArn":{ + "shape":"ARN", + "documentation":"

The Amazon Resource Name (ARN) of the delivery stream.

" + } + }, + "documentation":"

Configuration information of a Kinesis Firehose delivery stream.

" + }, + "KinesisStreamConfig":{ + "type":"structure", + "required":["StreamArn"], + "members":{ + "StreamArn":{ + "shape":"ARN", + "documentation":"

The Amazon Resource Name (ARN) of the data stream.

" + } + }, + "documentation":"

Configuration information of a Kinesis data stream.

" + }, + "KinesisVideoStreamConfig":{ + "type":"structure", + "required":[ + "Prefix", + "RetentionPeriodHours", + "EncryptionConfig" + ], + "members":{ + "Prefix":{ + "shape":"Prefix", + "documentation":"

The prefix of the video stream.

" + }, + "RetentionPeriodHours":{ + "shape":"Hours", + "documentation":"

The number of hours data is retained in the stream. Kinesis Video Streams retains the data in a data store that is associated with the stream.

The default value is 0, indicating that the stream does not persist data.

" + }, + "EncryptionConfig":{ + "shape":"EncryptionConfig", + "documentation":"

The encryption configuration.

" + } + }, + "documentation":"

Configuration information of a Kinesis video stream.

" + }, + "LexBot":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"BotName", + "documentation":"

The name of the Amazon Lex bot.

" + }, + "LexRegion":{ + "shape":"LexRegion", + "documentation":"

The Region the Amazon Lex bot was created in.

" + } + }, + "documentation":"

Configuration information of an Amazon Lex bot.

" + }, + "LexBotsList":{ + "type":"list", + "member":{"shape":"LexBot"} + }, + "LexRegion":{ + "type":"string", + "max":60 + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"Message", + "documentation":"

The message.

" + } + }, + "documentation":"

The allowed limit for the resource has been exceeded.

", + "error":{"httpStatusCode":429}, + "exception":true + }, + "ListApprovedOriginsRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance.

", + "location":"uri", + "locationName":"InstanceId" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxResult25", + "documentation":"

The maximimum number of results to return per page.

", + "box":true, + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListApprovedOriginsResponse":{ + "type":"structure", + "members":{ + "Origins":{ + "shape":"OriginsList", + "documentation":"

The approved origins.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If there are additional results, this is the token for the next set of results.

" + } + } + }, + "ListContactFlowsRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance.

", + "location":"uri", + "locationName":"InstanceId" + }, + "ContactFlowTypes":{ + "shape":"ContactFlowTypes", + "documentation":"

The type of contact flow.

", + "location":"querystring", + "locationName":"contactFlowTypes" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxResult1000", + "documentation":"

The maximimum number of results to return per page.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListContactFlowsResponse":{ + "type":"structure", + "members":{ + "ContactFlowSummaryList":{ + "shape":"ContactFlowSummaryList", + "documentation":"

Information about the contact flows.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If there are additional results, this is the token for the next set of results.

" + } + } + }, + "ListHoursOfOperationsRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance.

", + "location":"uri", + "locationName":"InstanceId" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxResult1000", + "documentation":"

The maximimum number of results to return per page.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListHoursOfOperationsResponse":{ + "type":"structure", + "members":{ + "HoursOfOperationSummaryList":{ + "shape":"HoursOfOperationSummaryList", + "documentation":"

Information about the hours of operation.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If there are additional results, this is the token for the next set of results.

" + } + } + }, + "ListInstanceAttributesRequest":{ "type":"structure", + "required":["InstanceId"], "members":{ - "Dimensions":{ - "shape":"Dimensions", - "documentation":"

The dimension for the metrics.

" + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance.

", + "location":"uri", + "locationName":"InstanceId" }, - "Collections":{ - "shape":"HistoricalMetricDataCollections", - "documentation":"

The set of metrics.

" + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxResult7", + "documentation":"

The maximimum number of results to return per page.

", + "box":true, + "location":"querystring", + "locationName":"maxResults" } - }, - "documentation":"

Contains information about the historical metrics retrieved.

" - }, - "HistoricalMetricResults":{ - "type":"list", - "member":{"shape":"HistoricalMetricResult"} + } }, - "HistoricalMetrics":{ - "type":"list", - "member":{"shape":"HistoricalMetric"} + "ListInstanceAttributesResponse":{ + "type":"structure", + "members":{ + "Attributes":{ + "shape":"AttributesList", + "documentation":"

The attribute types.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If there are additional results, this is the token for the next set of results.

" + } + } }, - "HoursOfOperationId":{"type":"string"}, - "HoursOfOperationName":{"type":"string"}, - "HoursOfOperationSummary":{ + "ListInstanceStorageConfigsRequest":{ "type":"structure", + "required":[ + "InstanceId", + "ResourceType" + ], "members":{ - "Id":{ - "shape":"HoursOfOperationId", - "documentation":"

The identifier of the hours of operation.

" + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance.

", + "location":"uri", + "locationName":"InstanceId" }, - "Arn":{ - "shape":"ARN", - "documentation":"

The Amazon Resource Name (ARN) of the hours of operation.

" + "ResourceType":{ + "shape":"InstanceStorageResourceType", + "documentation":"

A valid resource type.

", + "location":"querystring", + "locationName":"resourceType" }, - "Name":{ - "shape":"HoursOfOperationName", - "documentation":"

The name of the hours of operation.

" + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxResult10", + "documentation":"

The maximimum number of results to return per page.

", + "box":true, + "location":"querystring", + "locationName":"maxResults" } - }, - "documentation":"

Contains summary information about hours of operation for a contact center.

" - }, - "HoursOfOperationSummaryList":{ - "type":"list", - "member":{"shape":"HoursOfOperationSummary"} - }, - "InstanceId":{ - "type":"string", - "max":100, - "min":1 + } }, - "InternalServiceException":{ + "ListInstanceStorageConfigsResponse":{ "type":"structure", "members":{ - "Message":{ - "shape":"Message", - "documentation":"

The message.

" + "StorageConfigs":{ + "shape":"InstanceStorageConfigs", + "documentation":"

A valid storage type.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If there are additional results, this is the token for the next set of results.

" } - }, - "documentation":"

Request processing failed due to an error or failure with the service.

", - "error":{"httpStatusCode":500}, - "exception":true + } }, - "InvalidContactFlowException":{ + "ListInstancesRequest":{ "type":"structure", "members":{ - "problems":{ - "shape":"Problems", - "documentation":"

The problems with the contact flow. Please fix before trying again.

" + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxResult10", + "documentation":"

The maximimum number of results to return per page.

", + "box":true, + "location":"querystring", + "locationName":"maxResults" } - }, - "documentation":"

The contact flow is not valid.

", - "error":{"httpStatusCode":400}, - "exception":true + } }, - "InvalidParameterException":{ + "ListInstancesResponse":{ "type":"structure", "members":{ - "Message":{ - "shape":"Message", - "documentation":"

The message.

" + "InstanceSummaryList":{ + "shape":"InstanceSummaryList", + "documentation":"

Information about the instances.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If there are additional results, this is the token for the next set of results.

" } - }, - "documentation":"

One or more of the specified parameters are not valid.

", - "error":{"httpStatusCode":400}, - "exception":true + } }, - "InvalidRequestException":{ + "ListIntegrationAssociationsRequest":{ "type":"structure", + "required":["InstanceId"], "members":{ - "Message":{ - "shape":"Message", - "documentation":"

The message.

" + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance.

", + "location":"uri", + "locationName":"InstanceId" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxResult100", + "documentation":"

The maximimum number of results to return per page.

", + "box":true, + "location":"querystring", + "locationName":"maxResults" } - }, - "documentation":"

The request is not valid.

", - "error":{"httpStatusCode":400}, - "exception":true + } }, - "LimitExceededException":{ + "ListIntegrationAssociationsResponse":{ "type":"structure", "members":{ - "Message":{ - "shape":"Message", - "documentation":"

The message.

" + "IntegrationAssociationSummaryList":{ + "shape":"IntegrationAssociationSummaryList", + "documentation":"

The AppIntegration associations.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If there are additional results, this is the token for the next set of results.

" } - }, - "documentation":"

The allowed limit for the resource has been exceeded.

", - "error":{"httpStatusCode":429}, - "exception":true + } }, - "ListContactFlowsRequest":{ + "ListLambdaFunctionsRequest":{ "type":"structure", "required":["InstanceId"], "members":{ @@ -2042,12 +3912,6 @@ "location":"uri", "locationName":"InstanceId" }, - "ContactFlowTypes":{ - "shape":"ContactFlowTypes", - "documentation":"

The type of contact flow.

", - "location":"querystring", - "locationName":"contactFlowTypes" - }, "NextToken":{ "shape":"NextToken", "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

", @@ -2055,19 +3919,20 @@ "locationName":"nextToken" }, "MaxResults":{ - "shape":"MaxResult1000", + "shape":"MaxResult25", "documentation":"

The maximimum number of results to return per page.

", + "box":true, "location":"querystring", "locationName":"maxResults" } } }, - "ListContactFlowsResponse":{ + "ListLambdaFunctionsResponse":{ "type":"structure", "members":{ - "ContactFlowSummaryList":{ - "shape":"ContactFlowSummaryList", - "documentation":"

Information about the contact flows.

" + "LambdaFunctions":{ + "shape":"FunctionArnsList", + "documentation":"

The Lambdafunction ARNs associated with the specified instance.

" }, "NextToken":{ "shape":"NextToken", @@ -2075,7 +3940,7 @@ } } }, - "ListHoursOfOperationsRequest":{ + "ListLexBotsRequest":{ "type":"structure", "required":["InstanceId"], "members":{ @@ -2092,19 +3957,20 @@ "locationName":"nextToken" }, "MaxResults":{ - "shape":"MaxResult1000", + "shape":"MaxResult25", "documentation":"

The maximimum number of results to return per page.

", + "box":true, "location":"querystring", "locationName":"maxResults" } } }, - "ListHoursOfOperationsResponse":{ + "ListLexBotsResponse":{ "type":"structure", "members":{ - "HoursOfOperationSummaryList":{ - "shape":"HoursOfOperationSummaryList", - "documentation":"

Information about the hours of operation.

" + "LexBots":{ + "shape":"LexBotsList", + "documentation":"

The the names and regions of the Amazon Lex bots associated with the specified instance.

" }, "NextToken":{ "shape":"NextToken", @@ -2327,6 +4193,44 @@ } } }, + "ListSecurityKeysRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance.

", + "location":"uri", + "locationName":"InstanceId" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxResult2", + "documentation":"

The maximimum number of results to return per page.

", + "box":true, + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListSecurityKeysResponse":{ + "type":"structure", + "members":{ + "SecurityKeys":{ + "shape":"SecurityKeysList", + "documentation":"

The security keys.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If there are additional results, this is the token for the next set of results.

" + } + } + }, "ListSecurityProfilesRequest":{ "type":"structure", "required":["InstanceId"], @@ -2375,14 +4279,62 @@ "location":"uri", "locationName":"resourceArn" } - } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "tags":{ + "shape":"TagMap", + "documentation":"

Information about the tags.

" + } + } + }, + "ListUseCasesRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "IntegrationAssociationId" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance.

", + "location":"uri", + "locationName":"InstanceId" + }, + "IntegrationAssociationId":{ + "shape":"IntegrationAssociationId", + "documentation":"

The identifier for the integration association.

", + "location":"uri", + "locationName":"IntegrationAssociationId" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxResult100", + "documentation":"

The maximimum number of results to return per page.

", + "box":true, + "location":"querystring", + "locationName":"maxResults" + } + }, + "documentation":"

Provides summary information about the use cases for the specified Amazon Connect AppIntegration association.

" }, - "ListTagsForResourceResponse":{ + "ListUseCasesResponse":{ "type":"structure", "members":{ - "tags":{ - "shape":"TagMap", - "documentation":"

Information about the tags.

" + "UseCaseSummaryList":{ + "shape":"UseCaseSummaryList", + "documentation":"

The use cases.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If there are additional results, this is the token for the next set of results.

" } } }, @@ -2462,6 +4414,11 @@ } } }, + "MaxResult10":{ + "type":"integer", + "max":10, + "min":1 + }, "MaxResult100":{ "type":"integer", "max":100, @@ -2472,6 +4429,21 @@ "max":1000, "min":1 }, + "MaxResult2":{ + "type":"integer", + "max":2, + "min":1 + }, + "MaxResult25":{ + "type":"integer", + "max":25, + "min":1 + }, + "MaxResult7":{ + "type":"integer", + "max":7, + "min":1 + }, "MediaConcurrencies":{ "type":"list", "member":{"shape":"MediaConcurrency"} @@ -2495,7 +4467,21 @@ "documentation":"

Contains information about which channels are supported, and how many contacts an agent can have on a channel simultaneously.

" }, "Message":{"type":"string"}, + "Name":{ + "type":"string", + "max":512, + "min":0 + }, "NextToken":{"type":"string"}, + "Origin":{ + "type":"string", + "max":267 + }, + "OriginsList":{ + "type":"list", + "member":{"shape":"Origin"} + }, + "OutboundCallsEnabled":{"type":"boolean"}, "OutboundContactNotPermittedException":{ "type":"structure", "members":{ @@ -2508,6 +4494,11 @@ "error":{"httpStatusCode":403}, "exception":true }, + "PEM":{ + "type":"string", + "max":1024, + "min":1 + }, "ParticipantDetails":{ "type":"structure", "required":["DisplayName"], @@ -2831,6 +4822,11 @@ "DESK_PHONE" ] }, + "Prefix":{ + "type":"string", + "max":128, + "min":1 + }, "Priority":{ "type":"integer", "max":99, @@ -2953,6 +4949,64 @@ "max":100, "min":1 }, + "Reference":{ + "type":"structure", + "required":[ + "Value", + "Type" + ], + "members":{ + "Value":{ + "shape":"ReferenceValue", + "documentation":"

A formatted URL that will be shown to an agent in the Contact Control Panel (CCP)

" + }, + "Type":{ + "shape":"ReferenceType", + "documentation":"

A valid URL.

" + } + }, + "documentation":"

A link that an agent selects to complete a given task. You can have up to 4,096 UTF-8 bytes across all references for a contact.

" + }, + "ReferenceKey":{ + "type":"string", + "max":4096, + "min":1 + }, + "ReferenceType":{ + "type":"string", + "enum":["URL"] + }, + "ReferenceValue":{ + "type":"string", + "max":4096, + "min":0 + }, + "ResourceConflictException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

A resource already has that name.

", + "error":{"httpStatusCode":409}, + "exception":true + }, + "ResourceInUseException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"}, + "ResourceType":{ + "shape":"ResourceType", + "documentation":"

The type of resource.

" + }, + "ResourceId":{ + "shape":"ARN", + "documentation":"

The identifier for the resource.

" + } + }, + "documentation":"

That resource is already in use. Please try another.

", + "error":{"httpStatusCode":409}, + "exception":true + }, "ResourceNotFoundException":{ "type":"structure", "members":{ @@ -2965,6 +5019,18 @@ "error":{"httpStatusCode":404}, "exception":true }, + "ResourceType":{ + "type":"string", + "enum":[ + "CONTACT", + "CONTACT_FLOW", + "INSTANCE", + "PARTICIPANT", + "HIERARCHY_LEVEL", + "HIERARCHY_GROUP", + "USER" + ] + }, "ResumeContactRecordingRequest":{ "type":"structure", "required":[ @@ -3158,6 +5224,50 @@ "type":"list", "member":{"shape":"RoutingProfileSummary"} }, + "S3Config":{ + "type":"structure", + "required":[ + "BucketName", + "BucketPrefix" + ], + "members":{ + "BucketName":{ + "shape":"BucketName", + "documentation":"

The S3 bucket name.

" + }, + "BucketPrefix":{ + "shape":"Prefix", + "documentation":"

The S3 bucket prefix.

" + }, + "EncryptionConfig":{ + "shape":"EncryptionConfig", + "documentation":"

The S3 encryption configuration.

" + } + }, + "documentation":"

Information about the S3 storage type.

" + }, + "SecurityKey":{ + "type":"structure", + "members":{ + "AssociationId":{ + "shape":"AssociationId", + "documentation":"

The existing association identifier that uniquely identifies the resource type and storage config for the given instance ID.

" + }, + "Key":{ + "shape":"PEM", + "documentation":"

The key of the security key.

" + }, + "CreationTime":{ + "shape":"timestamp", + "documentation":"

When the security key was created.

" + } + }, + "documentation":"

Configuration information of the security key.

" + }, + "SecurityKeysList":{ + "type":"list", + "member":{"shape":"SecurityKey"} + }, "SecurityProfileId":{"type":"string"}, "SecurityProfileIds":{ "type":"list", @@ -3192,6 +5302,28 @@ "type":"string", "sensitive":true }, + "ServiceQuotaExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

The service quota has been exceeded.

", + "error":{"httpStatusCode":402}, + "exception":true + }, + "SourceApplicationName":{ + "type":"string", + "max":100, + "min":1, + "pattern":"^[a-zA-Z0-9_ -]+$" + }, + "SourceType":{ + "type":"string", + "enum":[ + "SALESFORCE", + "ZENDESK" + ] + }, "StartChatContactRequest":{ "type":"structure", "required":[ @@ -3324,6 +5456,58 @@ } } }, + "StartTaskContactRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "ContactFlowId", + "Name" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance.

" + }, + "PreviousContactId":{ + "shape":"ContactId", + "documentation":"

The identifier of the previous chat, voice, or task contact.

" + }, + "ContactFlowId":{ + "shape":"ContactFlowId", + "documentation":"

The identifier of the contact flow for initiating the tasks. To see the ContactFlowId in the Amazon Connect console user interface, on the navigation menu go to Routing, Contact Flows. Choose the contact flow. On the contact flow page, under the name of the contact flow, choose Show additional flow information. The ContactFlowId is the last part of the ARN, shown here in bold:

arn:aws:connect:us-west-2:xxxxxxxxxxxx:instance/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/contact-flow/846ec553-a005-41c0-8341-xxxxxxxxxxxx

" + }, + "Attributes":{ + "shape":"Attributes", + "documentation":"

A custom key-value pair using an attribute map. The attributes are standard Amazon Connect attributes, and can be accessed in contact flows just like any other contact attributes.

There can be up to 32,768 UTF-8 bytes across all key-value pairs per contact. Attribute keys can include only alphanumeric, dash, and underscore characters.

" + }, + "Name":{ + "shape":"Name", + "documentation":"

The name of a task that is shown to an agent in the Contact Control Panel (CCP).

" + }, + "References":{ + "shape":"ContactReferences", + "documentation":"

A formatted URL that is shown to an agent in the Contact Control Panel (CCP).

" + }, + "Description":{ + "shape":"Description", + "documentation":"

A description of the task that is shown to an agent in the Contact Control Panel (CCP).

" + }, + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

", + "idempotencyToken":true + } + } + }, + "StartTaskContactResponse":{ + "type":"structure", + "members":{ + "ContactId":{ + "shape":"ContactId", + "documentation":"

The identifier of this contact within the Amazon Connect instance.

" + } + } + }, "Statistic":{ "type":"string", "enum":[ @@ -3381,6 +5565,16 @@ "members":{ } }, + "StorageType":{ + "type":"string", + "enum":[ + "S3", + "KINESIS_VIDEO_STREAM", + "KINESIS_STREAM", + "KINESIS_FIREHOSE" + ] + }, + "String":{"type":"string"}, "SuspendContactRecordingRequest":{ "type":"structure", "required":[ @@ -3475,6 +5669,12 @@ "error":{"httpStatusCode":429}, "exception":true }, + "Timestamp":{"type":"timestamp"}, + "URI":{ + "type":"string", + "max":2000, + "min":1 + }, "Unit":{ "type":"string", "enum":[ @@ -3586,6 +5786,62 @@ } } }, + "UpdateInstanceAttributeRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "AttributeType", + "Value" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance.

", + "location":"uri", + "locationName":"InstanceId" + }, + "AttributeType":{ + "shape":"InstanceAttributeType", + "documentation":"

The type of attribute.

", + "location":"uri", + "locationName":"AttributeType" + }, + "Value":{ + "shape":"InstanceAttributeValue", + "documentation":"

The value for the attribute. Maximum character limit is 100.

" + } + } + }, + "UpdateInstanceStorageConfigRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "AssociationId", + "ResourceType", + "StorageConfig" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance.

", + "location":"uri", + "locationName":"InstanceId" + }, + "AssociationId":{ + "shape":"AssociationId", + "documentation":"

The existing association identifier that uniquely identifies the resource type and storage config for the given instance ID.

", + "location":"uri", + "locationName":"AssociationId" + }, + "ResourceType":{ + "shape":"InstanceStorageResourceType", + "documentation":"

A valid resource type.

", + "location":"querystring", + "locationName":"resourceType" + }, + "StorageConfig":{"shape":"InstanceStorageConfig"} + } + }, "UpdateRoutingProfileConcurrencyRequest":{ "type":"structure", "required":[ @@ -3689,7 +5945,33 @@ }, "QueueConfigs":{ "shape":"RoutingProfileQueueConfigList", - "documentation":"

The queues to be updated for this routing profile.

" + "documentation":"

The queues to be updated for this routing profile. Queues must first be associated to the routing profile. You can do this using AssociateRoutingProfileQueues.

" + } + } + }, + "UpdateUserHierarchyGroupNameRequest":{ + "type":"structure", + "required":[ + "Name", + "HierarchyGroupId", + "InstanceId" + ], + "members":{ + "Name":{ + "shape":"HierarchyGroupName", + "documentation":"

The name of the hierarchy group. Must not be more than 100 characters.

" + }, + "HierarchyGroupId":{ + "shape":"HierarchyGroupId", + "documentation":"

The identifier of the hierarchy group.

", + "location":"uri", + "locationName":"HierarchyGroupId" + }, + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance.

", + "location":"uri", + "locationName":"InstanceId" } } }, @@ -3718,6 +6000,25 @@ } } }, + "UpdateUserHierarchyStructureRequest":{ + "type":"structure", + "required":[ + "HierarchyStructure", + "InstanceId" + ], + "members":{ + "HierarchyStructure":{ + "shape":"HierarchyStructureUpdate", + "documentation":"

The hierarchy levels to update.

" + }, + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance.

", + "location":"uri", + "locationName":"InstanceId" + } + } + }, "UpdateUserIdentityInfoRequest":{ "type":"structure", "required":[ @@ -3822,6 +6123,37 @@ } } }, + "UseCase":{ + "type":"structure", + "members":{ + "UseCaseId":{ + "shape":"UseCaseId", + "documentation":"

The identifier for the use case.

" + }, + "UseCaseArn":{ + "shape":"ARN", + "documentation":"

The Amazon Resource Name (ARN) for the use case.

" + }, + "UseCaseType":{ + "shape":"UseCaseType", + "documentation":"

The type of use case to associate to the AppIntegration association. Each AppIntegration association can have only one of each use case type.

" + } + }, + "documentation":"

Contains the use case.

" + }, + "UseCaseId":{ + "type":"string", + "max":200, + "min":1 + }, + "UseCaseSummaryList":{ + "type":"list", + "member":{"shape":"UseCase"} + }, + "UseCaseType":{ + "type":"string", + "enum":["RULES_EVALUATION"] + }, "User":{ "type":"structure", "members":{ diff --git a/services/connectcontactlens/pom.xml b/services/connectcontactlens/pom.xml new file mode 100644 index 000000000000..4f3c0189c99f --- /dev/null +++ b/services/connectcontactlens/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.40-SNAPSHOT + + connectcontactlens + AWS Java SDK :: Services :: Connect Contact Lens + The AWS Java SDK for Connect Contact Lens module holds the client classes that are used for + communicating with Connect Contact Lens. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.connectcontactlens + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/connectcontactlens/src/main/resources/codegen-resources/paginators-1.json b/services/connectcontactlens/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..d1d69dccd15d --- /dev/null +++ b/services/connectcontactlens/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,9 @@ +{ + "pagination": { + "ListRealtimeContactAnalysisSegments": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + } + } +} diff --git a/services/connectcontactlens/src/main/resources/codegen-resources/service-2.json b/services/connectcontactlens/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..08a0c4e78b5b --- /dev/null +++ b/services/connectcontactlens/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,361 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2020-08-21", + "endpointPrefix":"contact-lens", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceAbbreviation":"Amazon Connect Contact Lens", + "serviceFullName":"Amazon Connect Contact Lens", + "serviceId":"Connect Contact Lens", + "signatureVersion":"v4", + "signingName":"connect", + "uid":"connect-contact-lens-2020-08-21" + }, + "operations":{ + "ListRealtimeContactAnalysisSegments":{ + "name":"ListRealtimeContactAnalysisSegments", + "http":{ + "method":"POST", + "requestUri":"/realtime-contact-analysis/analysis-segments" + }, + "input":{"shape":"ListRealtimeContactAnalysisSegmentsRequest"}, + "output":{"shape":"ListRealtimeContactAnalysisSegmentsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Provides a list of analysis segments for a real-time analysis session.

" + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

You do not have sufficient access to perform this action.

", + "error":{"httpStatusCode":403}, + "exception":true + }, + "Categories":{ + "type":"structure", + "required":[ + "MatchedCategories", + "MatchedDetails" + ], + "members":{ + "MatchedCategories":{ + "shape":"MatchedCategories", + "documentation":"

The category rules that have been matched in the analyzed segment.

" + }, + "MatchedDetails":{ + "shape":"MatchedDetails", + "documentation":"

The category rule that was matched and when it occurred in the transcript.

" + } + }, + "documentation":"

Provides the category rules that are used to automatically categorize contacts based on uttered keywords and phrases.

" + }, + "CategoryDetails":{ + "type":"structure", + "required":["PointsOfInterest"], + "members":{ + "PointsOfInterest":{ + "shape":"PointsOfInterest", + "documentation":"

The section of audio where the category rule was detected.

" + } + }, + "documentation":"

Provides information about the category rule that was matched.

" + }, + "CategoryName":{ + "type":"string", + "max":256, + "min":1, + "pattern":".*\\S.*" + }, + "CharacterOffset":{ + "type":"integer", + "min":0 + }, + "CharacterOffsets":{ + "type":"structure", + "required":[ + "BeginOffsetChar", + "EndOffsetChar" + ], + "members":{ + "BeginOffsetChar":{ + "shape":"CharacterOffset", + "documentation":"

The beginning of the issue.

" + }, + "EndOffsetChar":{ + "shape":"CharacterOffset", + "documentation":"

The end of the issue.

" + } + }, + "documentation":"

For characters that were detected as issues, where they occur in the transcript.

" + }, + "ContactId":{ + "type":"string", + "max":256, + "min":1, + "pattern":".*\\S.*" + }, + "InstanceId":{ + "type":"string", + "max":256, + "min":1, + "pattern":".*\\S.*" + }, + "InternalServiceException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

Request processing failed due to an error or failure with the service.

", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "InvalidRequestException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

The request is not valid.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "IssueDetected":{ + "type":"structure", + "required":["CharacterOffsets"], + "members":{ + "CharacterOffsets":{ + "shape":"CharacterOffsets", + "documentation":"

The offset for when the issue was detected in the segment.

" + } + }, + "documentation":"

Potential issues that are detected based on an artificial intelligence analysis of each turn in the conversation.

" + }, + "IssuesDetected":{ + "type":"list", + "member":{"shape":"IssueDetected"}, + "max":20, + "min":0 + }, + "ListRealtimeContactAnalysisSegmentsRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "ContactId" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance.

" + }, + "ContactId":{ + "shape":"ContactId", + "documentation":"

The identifier of the contact.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximimum number of results to return per page.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

" + } + } + }, + "ListRealtimeContactAnalysisSegmentsResponse":{ + "type":"structure", + "required":["Segments"], + "members":{ + "Segments":{ + "shape":"RealtimeContactAnalysisSegments", + "documentation":"

An analyzed transcript or category.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If there are additional results, this is the token for the next set of results. If response includes nextToken there are two possible scenarios:

  • There are more segments so another call is required to get them.

  • There are no more segments at this time, but more may be available later (real-time analysis is in progress) so the client should call the operation again to get new segments.

If response does not include nextToken, the analysis is completed (successfully or failed) and there are no more segments to retrieve.

" + } + } + }, + "MatchedCategories":{ + "type":"list", + "member":{"shape":"CategoryName"}, + "max":150, + "min":0 + }, + "MatchedDetails":{ + "type":"map", + "key":{"shape":"CategoryName"}, + "value":{"shape":"CategoryDetails"}, + "max":150, + "min":0 + }, + "MaxResults":{ + "type":"integer", + "max":100, + "min":1 + }, + "Message":{"type":"string"}, + "NextToken":{ + "type":"string", + "max":131070, + "min":1, + "pattern":".*\\S.*" + }, + "OffsetMillis":{ + "type":"integer", + "min":0 + }, + "ParticipantId":{ + "type":"string", + "max":256, + "min":1, + "pattern":".*\\S.*" + }, + "ParticipantRole":{ + "type":"string", + "max":256, + "min":1, + "pattern":".*\\S.*" + }, + "PointOfInterest":{ + "type":"structure", + "required":[ + "BeginOffsetMillis", + "EndOffsetMillis" + ], + "members":{ + "BeginOffsetMillis":{ + "shape":"OffsetMillis", + "documentation":"

The beginning offset in milliseconds where the category rule was detected.

" + }, + "EndOffsetMillis":{ + "shape":"OffsetMillis", + "documentation":"

The ending offset in milliseconds where the category rule was detected.

" + } + }, + "documentation":"

The section of the contact audio where that category rule was detected.

" + }, + "PointsOfInterest":{ + "type":"list", + "member":{"shape":"PointOfInterest"}, + "max":20, + "min":0 + }, + "RealtimeContactAnalysisSegment":{ + "type":"structure", + "members":{ + "Transcript":{ + "shape":"Transcript", + "documentation":"

The analyzed transcript.

" + }, + "Categories":{ + "shape":"Categories", + "documentation":"

The matched category rules.

" + } + }, + "documentation":"

An analyzed segment for a real-time analysis session.

" + }, + "RealtimeContactAnalysisSegments":{ + "type":"list", + "member":{"shape":"RealtimeContactAnalysisSegment"}, + "max":100, + "min":0 + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

The specified resource was not found.

", + "error":{"httpStatusCode":404}, + "exception":true + }, + "SentimentValue":{ + "type":"string", + "enum":[ + "POSITIVE", + "NEUTRAL", + "NEGATIVE" + ] + }, + "ThrottlingException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

The throttling limit has been exceeded.

", + "error":{"httpStatusCode":429}, + "exception":true + }, + "Transcript":{ + "type":"structure", + "required":[ + "Id", + "ParticipantId", + "ParticipantRole", + "Content", + "BeginOffsetMillis", + "EndOffsetMillis", + "Sentiment" + ], + "members":{ + "Id":{ + "shape":"TranscriptId", + "documentation":"

The identifier of the transcript.

" + }, + "ParticipantId":{ + "shape":"ParticipantId", + "documentation":"

The identifier of the participant.

" + }, + "ParticipantRole":{ + "shape":"ParticipantRole", + "documentation":"

The role of participant. For example, is it a customer, agent, or system.

" + }, + "Content":{ + "shape":"TranscriptContent", + "documentation":"

The content of the transcript.

" + }, + "BeginOffsetMillis":{ + "shape":"OffsetMillis", + "documentation":"

The beginning offset in the contact for this transcript.

" + }, + "EndOffsetMillis":{ + "shape":"OffsetMillis", + "documentation":"

The end offset in the contact for this transcript.

" + }, + "Sentiment":{ + "shape":"SentimentValue", + "documentation":"

The sentiment of the detected for this piece of transcript.

" + }, + "IssuesDetected":{ + "shape":"IssuesDetected", + "documentation":"

List of positions where issues were detected on the transcript.

" + } + }, + "documentation":"

A list of messages in the session.

" + }, + "TranscriptContent":{ + "type":"string", + "min":1, + "pattern":".*\\S.*" + }, + "TranscriptId":{ + "type":"string", + "max":256, + "min":1, + "pattern":".*\\S.*" + } + }, + "documentation":"

Contact Lens for Amazon Connect enables you to analyze conversations between customer and agents, by using speech transcription, natural language processing, and intelligent search capabilities. It performs sentiment analysis, detects issues, and enables you to automatically categorize contacts.

Contact Lens for Amazon Connect provides both real-time and post-call analytics of customer-agent conversations. For more information, see Analyze conversations using Contact Lens in the Amazon Connect Administrator Guide.

" +} diff --git a/services/connectparticipant/pom.xml b/services/connectparticipant/pom.xml index ea00c89aa536..4b59dbab1542 100644 --- a/services/connectparticipant/pom.xml +++ b/services/connectparticipant/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT connectparticipant AWS Java SDK :: Services :: ConnectParticipant diff --git a/services/costandusagereport/pom.xml b/services/costandusagereport/pom.xml index 69c656ecab2f..2c269f39a7ca 100644 --- a/services/costandusagereport/pom.xml +++ b/services/costandusagereport/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT costandusagereport AWS Java SDK :: Services :: AWS Cost and Usage Report diff --git a/services/costexplorer/pom.xml b/services/costexplorer/pom.xml index d5fa643c4052..65d8fda262a7 100644 --- a/services/costexplorer/pom.xml +++ b/services/costexplorer/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT 4.0.0 costexplorer diff --git a/services/costexplorer/src/main/resources/codegen-resources/service-2.json b/services/costexplorer/src/main/resources/codegen-resources/service-2.json index 85db44f930c5..b4f10c9a1ee3 100644 --- a/services/costexplorer/src/main/resources/codegen-resources/service-2.json +++ b/services/costexplorer/src/main/resources/codegen-resources/service-2.json @@ -170,7 +170,7 @@ {"shape":"InvalidNextTokenException"}, {"shape":"RequestChangedException"} ], - "documentation":"

Retrieves cost and usage metrics for your account. You can specify which cost and usage-related metric, such as BlendedCosts or UsageQuantity, that you want the request to return. You can also filter and group your data by various dimensions, such as SERVICE or AZ, in a specific time range. For a complete list of valid dimensions, see the GetDimensionValues operation. Master account in an organization in AWS Organizations have access to all member accounts.

" + "documentation":"

Retrieves cost and usage metrics for your account. You can specify which cost and usage-related metric, such as BlendedCosts or UsageQuantity, that you want the request to return. You can also filter and group your data by various dimensions, such as SERVICE or AZ, in a specific time range. For a complete list of valid dimensions, see the GetDimensionValues operation. Management account in an organization in AWS Organizations have access to all member accounts.

For information about filter limitations, see Quotas and restrictions in the Billing and Cost Management User Guide.

" }, "GetCostAndUsageWithResources":{ "name":"GetCostAndUsageWithResources", @@ -187,7 +187,7 @@ {"shape":"InvalidNextTokenException"}, {"shape":"RequestChangedException"} ], - "documentation":"

Retrieves cost and usage metrics with resources for your account. You can specify which cost and usage-related metric, such as BlendedCosts or UsageQuantity, that you want the request to return. You can also filter and group your data by various dimensions, such as SERVICE or AZ, in a specific time range. For a complete list of valid dimensions, see the GetDimensionValues operation. Master account in an organization in AWS Organizations have access to all member accounts. This API is currently available for the Amazon Elastic Compute Cloud – Compute service only.

This is an opt-in only feature. You can enable this feature from the Cost Explorer Settings page. For information on how to access the Settings page, see Controlling Access for Cost Explorer in the AWS Billing and Cost Management User Guide.

" + "documentation":"

Retrieves cost and usage metrics with resources for your account. You can specify which cost and usage-related metric, such as BlendedCosts or UsageQuantity, that you want the request to return. You can also filter and group your data by various dimensions, such as SERVICE or AZ, in a specific time range. For a complete list of valid dimensions, see the GetDimensionValues operation. Management account in an organization in AWS Organizations have access to all member accounts. This API is currently available for the Amazon Elastic Compute Cloud – Compute service only.

This is an opt-in only feature. You can enable this feature from the Cost Explorer Settings page. For information on how to access the Settings page, see Controlling Access for Cost Explorer in the AWS Billing and Cost Management User Guide.

" }, "GetCostForecast":{ "name":"GetCostForecast", @@ -233,7 +233,7 @@ {"shape":"DataUnavailableException"}, {"shape":"InvalidNextTokenException"} ], - "documentation":"

Retrieves the reservation coverage for your account. This enables you to see how much of your Amazon Elastic Compute Cloud, Amazon ElastiCache, Amazon Relational Database Service, or Amazon Redshift usage is covered by a reservation. An organization's master account can see the coverage of the associated member accounts. This supports dimensions, Cost Categories, and nested expressions. For any time period, you can filter data about reservation usage by the following dimensions:

  • AZ

  • CACHE_ENGINE

  • DATABASE_ENGINE

  • DEPLOYMENT_OPTION

  • INSTANCE_TYPE

  • LINKED_ACCOUNT

  • OPERATING_SYSTEM

  • PLATFORM

  • REGION

  • SERVICE

  • TAG

  • TENANCY

To determine valid values for a dimension, use the GetDimensionValues operation.

" + "documentation":"

Retrieves the reservation coverage for your account. This enables you to see how much of your Amazon Elastic Compute Cloud, Amazon ElastiCache, Amazon Relational Database Service, or Amazon Redshift usage is covered by a reservation. An organization's management account can see the coverage of the associated member accounts. This supports dimensions, Cost Categories, and nested expressions. For any time period, you can filter data about reservation usage by the following dimensions:

  • AZ

  • CACHE_ENGINE

  • DATABASE_ENGINE

  • DEPLOYMENT_OPTION

  • INSTANCE_TYPE

  • LINKED_ACCOUNT

  • OPERATING_SYSTEM

  • PLATFORM

  • REGION

  • SERVICE

  • TAG

  • TENANCY

To determine valid values for a dimension, use the GetDimensionValues operation.

" }, "GetReservationPurchaseRecommendation":{ "name":"GetReservationPurchaseRecommendation", @@ -263,7 +263,7 @@ {"shape":"DataUnavailableException"}, {"shape":"InvalidNextTokenException"} ], - "documentation":"

Retrieves the reservation utilization for your account. Master account in an organization have access to member accounts. You can filter data by dimensions in a time period. You can use GetDimensionValues to determine the possible dimension values. Currently, you can group only by SUBSCRIPTION_ID.

" + "documentation":"

Retrieves the reservation utilization for your account. Management account in an organization have access to member accounts. You can filter data by dimensions in a time period. You can use GetDimensionValues to determine the possible dimension values. Currently, you can group only by SUBSCRIPTION_ID.

" }, "GetRightsizingRecommendation":{ "name":"GetRightsizingRecommendation", @@ -292,7 +292,7 @@ {"shape":"DataUnavailableException"}, {"shape":"InvalidNextTokenException"} ], - "documentation":"

Retrieves the Savings Plans covered for your account. This enables you to see how much of your cost is covered by a Savings Plan. An organization’s master account can see the coverage of the associated member accounts. This supports dimensions, Cost Categories, and nested expressions. For any time period, you can filter data for Savings Plans usage with the following dimensions:

  • LINKED_ACCOUNT

  • REGION

  • SERVICE

  • INSTANCE_FAMILY

To determine valid values for a dimension, use the GetDimensionValues operation.

" + "documentation":"

Retrieves the Savings Plans covered for your account. This enables you to see how much of your cost is covered by a Savings Plan. An organization’s management account can see the coverage of the associated member accounts. This supports dimensions, Cost Categories, and nested expressions. For any time period, you can filter data for Savings Plans usage with the following dimensions:

  • LINKED_ACCOUNT

  • REGION

  • SERVICE

  • INSTANCE_FAMILY

To determine valid values for a dimension, use the GetDimensionValues operation.

" }, "GetSavingsPlansPurchaseRecommendation":{ "name":"GetSavingsPlansPurchaseRecommendation", @@ -320,7 +320,7 @@ {"shape":"LimitExceededException"}, {"shape":"DataUnavailableException"} ], - "documentation":"

Retrieves the Savings Plans utilization for your account across date ranges with daily or monthly granularity. Master account in an organization have access to member accounts. You can use GetDimensionValues in SAVINGS_PLANS to determine the possible dimension values.

You cannot group by any dimension values for GetSavingsPlansUtilization.

" + "documentation":"

Retrieves the Savings Plans utilization for your account across date ranges with daily or monthly granularity. Management account in an organization have access to member accounts. You can use GetDimensionValues in SAVINGS_PLANS to determine the possible dimension values.

You cannot group by any dimension values for GetSavingsPlansUtilization.

" }, "GetSavingsPlansUtilizationDetails":{ "name":"GetSavingsPlansUtilizationDetails", @@ -1665,7 +1665,7 @@ }, "GroupBy":{ "shape":"GroupDefinitions", - "documentation":"

You can group Amazon Web Services costs using up to two different groups: either dimensions, tag keys, or both.

" + "documentation":"

You can group Amazon Web Services costs using up to two different groups: DIMENSION, TAG, COST_CATEGORY.

" }, "NextPageToken":{ "shape":"NextPageToken", @@ -1851,7 +1851,7 @@ }, "AccountScope":{ "shape":"AccountScope", - "documentation":"

The account scope that you want your recommendations for. Amazon Web Services calculates recommendations including the master account and member accounts if the value is set to PAYER. If the value is LINKED, recommendations are calculated for individual member accounts only.

" + "documentation":"

The account scope that you want your recommendations for. Amazon Web Services calculates recommendations including the management account and member accounts if the value is set to PAYER. If the value is LINKED, recommendations are calculated for individual member accounts only.

" }, "LookbackPeriodInDays":{ "shape":"LookbackPeriodInDays", @@ -2060,7 +2060,7 @@ }, "AccountScope":{ "shape":"AccountScope", - "documentation":"

The account scope that you want your recommendations for. Amazon Web Services calculates recommendations including the master account and member accounts if the value is set to PAYER. If the value is LINKED, recommendations are calculated for individual member accounts only.

" + "documentation":"

The account scope that you want your recommendations for. Amazon Web Services calculates recommendations including the management account and member accounts if the value is set to PAYER. If the value is LINKED, recommendations are calculated for individual member accounts only.

" }, "NextPageToken":{ "shape":"NextPageToken", @@ -3197,7 +3197,7 @@ "members":{ "AccountScope":{ "shape":"AccountScope", - "documentation":"

The account scope that you want your recommendations for. Amazon Web Services calculates recommendations including the master account and member accounts if the value is set to PAYER. If the value is LINKED, recommendations are calculated for individual member accounts only.

" + "documentation":"

The account scope that you want your recommendations for. Amazon Web Services calculates recommendations including the management account and member accounts if the value is set to PAYER. If the value is LINKED, recommendations are calculated for individual member accounts only.

" }, "SavingsPlansType":{ "shape":"SupportedSavingsPlansType", @@ -3310,6 +3310,10 @@ "GenerationTimestamp":{ "shape":"GenericString", "documentation":"

The timestamp showing when the recommendations were generated.

" + }, + "AdditionalMetadata":{ + "shape":"GenericString", + "documentation":"

Additional metadata that may be applicable to the recommendation.

" } }, "documentation":"

Metadata about your Savings Plans Purchase Recommendations.

" @@ -3466,7 +3470,7 @@ "documentation":"

The total amortized commitment for a Savings Plans. Includes the sum of the upfront and recurring Savings Plans fees.

" } }, - "documentation":"

A single daily or monthly Savings Plans utilization rate, and details for your account. A master account in an organization have access to member accounts. You can use GetDimensionValues to determine the possible dimension values.

" + "documentation":"

A single daily or monthly Savings Plans utilization rate, and details for your account. A management account in an organization have access to member accounts. You can use GetDimensionValues to determine the possible dimension values.

" }, "SavingsPlansUtilizationDetails":{ "type":"list", @@ -3522,7 +3526,7 @@ "type":"string", "max":302, "min":6, - "pattern":"(^[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+(\\.[a-zA-Z0-9_-]+)+$)|(^arn:(aws[a-zA-Z-]*):sns:[a-zA-Z0-9-]+:[0-9]{12}:[a-zA-Z0-9_-]+$)" + "pattern":"(^[a-zA-Z0-9.!#$%&'*+=?^_‘{|}~-]+@[a-zA-Z0-9_-]+(\\.[a-zA-Z0-9_-]+)+$)|(^arn:(aws[a-zA-Z-]*):sns:[a-zA-Z0-9-]+:[0-9]{12}:[a-zA-Z0-9_-]+$)" }, "SubscriberStatus":{ "type":"string", diff --git a/services/customerprofiles/pom.xml b/services/customerprofiles/pom.xml new file mode 100644 index 000000000000..83abda71c5d1 --- /dev/null +++ b/services/customerprofiles/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.40-SNAPSHOT + + customerprofiles + AWS Java SDK :: Services :: Customer Profiles + The AWS Java SDK for Customer Profiles module holds the client classes that are used for + communicating with Customer Profiles. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.customerprofiles + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/customerprofiles/src/main/resources/codegen-resources/paginators-1.json b/services/customerprofiles/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..5677bd8e4a2d --- /dev/null +++ b/services/customerprofiles/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,4 @@ +{ + "pagination": { + } +} diff --git a/services/customerprofiles/src/main/resources/codegen-resources/service-2.json b/services/customerprofiles/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..83ca3c5325a6 --- /dev/null +++ b/services/customerprofiles/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,2448 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2020-08-15", + "endpointPrefix":"profile", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceAbbreviation":"Customer Profiles", + "serviceFullName":"Amazon Connect Customer Profiles", + "serviceId":"Customer Profiles", + "signatureVersion":"v4", + "signingName":"profile", + "uid":"customer-profiles-2020-08-15" + }, + "operations":{ + "AddProfileKey":{ + "name":"AddProfileKey", + "http":{ + "method":"POST", + "requestUri":"/domains/{DomainName}/profiles/keys" + }, + "input":{"shape":"AddProfileKeyRequest"}, + "output":{"shape":"AddProfileKeyResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Associates a new key value with a specific profile, such as a Contact Trace Record (CTR) ContactId.

A profile object can have a single unique key and any number of additional keys that can be used to identify the profile that it belongs to.

" + }, + "CreateDomain":{ + "name":"CreateDomain", + "http":{ + "method":"POST", + "requestUri":"/domains/{DomainName}" + }, + "input":{"shape":"CreateDomainRequest"}, + "output":{"shape":"CreateDomainResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Creates a domain, which is a container for all customer data, such as customer profile attributes, object types, profile keys, and encryption keys. You can create multiple domains, and each domain can have multiple third-party integrations.

Each Amazon Connect instance can be associated with only one domain. Multiple Amazon Connect instances can be associated with one domain.

" + }, + "CreateProfile":{ + "name":"CreateProfile", + "http":{ + "method":"POST", + "requestUri":"/domains/{DomainName}/profiles" + }, + "input":{"shape":"CreateProfileRequest"}, + "output":{"shape":"CreateProfileResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Creates a standard profile.

A standard profile represents the following attributes for a customer profile in a domain.

" + }, + "DeleteDomain":{ + "name":"DeleteDomain", + "http":{ + "method":"DELETE", + "requestUri":"/domains/{DomainName}" + }, + "input":{"shape":"DeleteDomainRequest"}, + "output":{"shape":"DeleteDomainResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Deletes a specific domain and all of its customer data, such as customer profile attributes and their related objects.

" + }, + "DeleteIntegration":{ + "name":"DeleteIntegration", + "http":{ + "method":"POST", + "requestUri":"/domains/{DomainName}/integrations/delete" + }, + "input":{"shape":"DeleteIntegrationRequest"}, + "output":{"shape":"DeleteIntegrationResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Removes an integration from a specific domain.

" + }, + "DeleteProfile":{ + "name":"DeleteProfile", + "http":{ + "method":"POST", + "requestUri":"/domains/{DomainName}/profiles/delete" + }, + "input":{"shape":"DeleteProfileRequest"}, + "output":{"shape":"DeleteProfileResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Deletes the standard customer profile and all data pertaining to the profile.

" + }, + "DeleteProfileKey":{ + "name":"DeleteProfileKey", + "http":{ + "method":"POST", + "requestUri":"/domains/{DomainName}/profiles/keys/delete" + }, + "input":{"shape":"DeleteProfileKeyRequest"}, + "output":{"shape":"DeleteProfileKeyResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Removes a searchable key from a customer profile.

" + }, + "DeleteProfileObject":{ + "name":"DeleteProfileObject", + "http":{ + "method":"POST", + "requestUri":"/domains/{DomainName}/profiles/objects/delete" + }, + "input":{"shape":"DeleteProfileObjectRequest"}, + "output":{"shape":"DeleteProfileObjectResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Removes an object associated with a profile of a given ProfileObjectType.

" + }, + "DeleteProfileObjectType":{ + "name":"DeleteProfileObjectType", + "http":{ + "method":"DELETE", + "requestUri":"/domains/{DomainName}/object-types/{ObjectTypeName}" + }, + "input":{"shape":"DeleteProfileObjectTypeRequest"}, + "output":{"shape":"DeleteProfileObjectTypeResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Removes a ProfileObjectType from a specific domain as well as removes all the ProfileObjects of that type. It also disables integrations from this specific ProfileObjectType. In addition, it scrubs all of the fields of the standard profile that were populated from this ProfileObjectType.

" + }, + "GetDomain":{ + "name":"GetDomain", + "http":{ + "method":"GET", + "requestUri":"/domains/{DomainName}" + }, + "input":{"shape":"GetDomainRequest"}, + "output":{"shape":"GetDomainResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Returns information about a specific domain.

" + }, + "GetIntegration":{ + "name":"GetIntegration", + "http":{ + "method":"POST", + "requestUri":"/domains/{DomainName}/integrations" + }, + "input":{"shape":"GetIntegrationRequest"}, + "output":{"shape":"GetIntegrationResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Returns an integration for a domain.

" + }, + "GetProfileObjectType":{ + "name":"GetProfileObjectType", + "http":{ + "method":"GET", + "requestUri":"/domains/{DomainName}/object-types/{ObjectTypeName}" + }, + "input":{"shape":"GetProfileObjectTypeRequest"}, + "output":{"shape":"GetProfileObjectTypeResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Returns the object types for a specific domain.

" + }, + "GetProfileObjectTypeTemplate":{ + "name":"GetProfileObjectTypeTemplate", + "http":{ + "method":"GET", + "requestUri":"/templates/{TemplateId}" + }, + "input":{"shape":"GetProfileObjectTypeTemplateRequest"}, + "output":{"shape":"GetProfileObjectTypeTemplateResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Returns the template information for a specific object type.

A template is a predefined ProfileObjectType, such as “Salesforce-Account” or “Salesforce-Contact.” When a user sends a ProfileObject, using the PutProfileObject API, with an ObjectTypeName that matches one of the TemplateIds, it uses the mappings from the template.

" + }, + "ListAccountIntegrations":{ + "name":"ListAccountIntegrations", + "http":{ + "method":"POST", + "requestUri":"/integrations" + }, + "input":{"shape":"ListAccountIntegrationsRequest"}, + "output":{"shape":"ListAccountIntegrationsResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Lists all of the integrations associated to a specific URI in the AWS account.

" + }, + "ListDomains":{ + "name":"ListDomains", + "http":{ + "method":"GET", + "requestUri":"/domains" + }, + "input":{"shape":"ListDomainsRequest"}, + "output":{"shape":"ListDomainsResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Returns a list of all the domains for an AWS account that have been created.

" + }, + "ListIntegrations":{ + "name":"ListIntegrations", + "http":{ + "method":"GET", + "requestUri":"/domains/{DomainName}/integrations" + }, + "input":{"shape":"ListIntegrationsRequest"}, + "output":{"shape":"ListIntegrationsResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Lists all of the integrations in your domain.

" + }, + "ListProfileObjectTypeTemplates":{ + "name":"ListProfileObjectTypeTemplates", + "http":{ + "method":"GET", + "requestUri":"/templates" + }, + "input":{"shape":"ListProfileObjectTypeTemplatesRequest"}, + "output":{"shape":"ListProfileObjectTypeTemplatesResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Lists all of the template information for object types.

" + }, + "ListProfileObjectTypes":{ + "name":"ListProfileObjectTypes", + "http":{ + "method":"GET", + "requestUri":"/domains/{DomainName}/object-types" + }, + "input":{"shape":"ListProfileObjectTypesRequest"}, + "output":{"shape":"ListProfileObjectTypesResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Lists all of the templates available within the service.

" + }, + "ListProfileObjects":{ + "name":"ListProfileObjects", + "http":{ + "method":"POST", + "requestUri":"/domains/{DomainName}/profiles/objects" + }, + "input":{"shape":"ListProfileObjectsRequest"}, + "output":{"shape":"ListProfileObjectsResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Returns a list of objects associated with a profile of a given ProfileObjectType.

" + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Displays the tags associated with an Amazon Connect Customer Profiles resource. In Connect Customer Profiles, domains, profile object types, and integrations can be tagged.

" + }, + "PutIntegration":{ + "name":"PutIntegration", + "http":{ + "method":"PUT", + "requestUri":"/domains/{DomainName}/integrations" + }, + "input":{"shape":"PutIntegrationRequest"}, + "output":{"shape":"PutIntegrationResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Adds an integration between the service and a third-party service, which includes Amazon AppFlow and Amazon Connect.

An integration can belong to only one domain.

" + }, + "PutProfileObject":{ + "name":"PutProfileObject", + "http":{ + "method":"PUT", + "requestUri":"/domains/{DomainName}/profiles/objects" + }, + "input":{"shape":"PutProfileObjectRequest"}, + "output":{"shape":"PutProfileObjectResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Adds additional objects to customer profiles of a given ObjectType.

When adding a specific profile object, like a Contact Trace Record (CTR), an inferred profile can get created if it is not mapped to an existing profile. The resulting profile will only have a phone number populated in the standard ProfileObject. Any additional CTRs with the same phone number will be mapped to the same inferred profile.

When a ProfileObject is created and if a ProfileObjectType already exists for the ProfileObject, it will provide data to a standard profile depending on the ProfileObjectType definition.

PutProfileObject needs an ObjectType, which can be created using PutProfileObjectType.

" + }, + "PutProfileObjectType":{ + "name":"PutProfileObjectType", + "http":{ + "method":"PUT", + "requestUri":"/domains/{DomainName}/object-types/{ObjectTypeName}" + }, + "input":{"shape":"PutProfileObjectTypeRequest"}, + "output":{"shape":"PutProfileObjectTypeResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Defines a ProfileObjectType.

" + }, + "SearchProfiles":{ + "name":"SearchProfiles", + "http":{ + "method":"POST", + "requestUri":"/domains/{DomainName}/profiles/search" + }, + "input":{"shape":"SearchProfilesRequest"}, + "output":{"shape":"SearchProfilesResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Searches for profiles within a specific domain name using name, phone number, email address, account number, or a custom defined index.

" + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Assigns one or more tags (key-value pairs) to the specified Amazon Connect Customer Profiles resource. Tags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values. In Connect Customer Profiles, domains, profile object types, and integrations can be tagged.

Tags don't have any semantic meaning to AWS and are interpreted strictly as strings of characters.

You can use the TagResource action with a resource that already has tags. If you specify a new tag key, this tag is appended to the list of tags associated with the resource. If you specify a tag key that is already associated with the resource, the new tag value that you specify replaces the previous value for that tag.

You can associate as many as 50 tags with a resource.

" + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Removes one or more tags from the specified Amazon Connect Customer Profiles resource. In Connect Customer Profiles, domains, profile object types, and integrations can be tagged.

" + }, + "UpdateDomain":{ + "name":"UpdateDomain", + "http":{ + "method":"PUT", + "requestUri":"/domains/{DomainName}" + }, + "input":{"shape":"UpdateDomainRequest"}, + "output":{"shape":"UpdateDomainResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Updates the properties of a domain, including creating or selecting a dead letter queue or an encryption key.

Once a domain is created, the name can’t be changed.

" + }, + "UpdateProfile":{ + "name":"UpdateProfile", + "http":{ + "method":"PUT", + "requestUri":"/domains/{DomainName}/profiles" + }, + "input":{"shape":"UpdateProfileRequest"}, + "output":{"shape":"UpdateProfileResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Updates the properties of a profile. The ProfileId is required for updating a customer profile.

When calling the UpdateProfile API, specifying an empty string value means that any existing value will be removed. Not specifying a string value means that any value already there will be kept.

" + } + }, + "shapes":{ + "name":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[a-zA-Z0-9_-]+$" + }, + "AccessDeniedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"message"} + }, + "documentation":"

You do not have sufficient access to perform this action.

", + "error":{"httpStatusCode":403}, + "exception":true + }, + "AddProfileKeyRequest":{ + "type":"structure", + "required":[ + "ProfileId", + "KeyName", + "Values", + "DomainName" + ], + "members":{ + "ProfileId":{ + "shape":"uuid", + "documentation":"

The unique identifier of a customer profile.

" + }, + "KeyName":{ + "shape":"name", + "documentation":"

A searchable identifier of a customer profile.

" + }, + "Values":{ + "shape":"requestValueList", + "documentation":"

A list of key values.

" + }, + "DomainName":{ + "shape":"name", + "documentation":"

The unique name of the domain.

", + "location":"uri", + "locationName":"DomainName" + } + } + }, + "AddProfileKeyResponse":{ + "type":"structure", + "members":{ + "KeyName":{ + "shape":"name", + "documentation":"

A searchable identifier of a customer profile.

" + }, + "Values":{ + "shape":"requestValueList", + "documentation":"

A list of key values.

" + } + } + }, + "Address":{ + "type":"structure", + "members":{ + "Address1":{ + "shape":"string1To255", + "documentation":"

The first line of a customer address.

" + }, + "Address2":{ + "shape":"string1To255", + "documentation":"

The second line of a customer address.

" + }, + "Address3":{ + "shape":"string1To255", + "documentation":"

The third line of a customer address.

" + }, + "Address4":{ + "shape":"string1To255", + "documentation":"

The fourth line of a customer address.

" + }, + "City":{ + "shape":"string1To255", + "documentation":"

The city in which a customer lives.

" + }, + "County":{ + "shape":"string1To255", + "documentation":"

The county in which a customer lives.

" + }, + "State":{ + "shape":"string1To255", + "documentation":"

The state in which a customer lives.

" + }, + "Province":{ + "shape":"string1To255", + "documentation":"

The province in which a customer lives.

" + }, + "Country":{ + "shape":"string1To255", + "documentation":"

The country in which a customer lives.

" + }, + "PostalCode":{ + "shape":"string1To255", + "documentation":"

The postal code of a customer address.

" + } + }, + "documentation":"

A generic address associated with the customer that is not mailing, shipping, or billing.

" + }, + "Attributes":{ + "type":"map", + "key":{"shape":"string1To255"}, + "value":{"shape":"string1To255"} + }, + "BadRequestException":{ + "type":"structure", + "members":{ + "Message":{"shape":"message"} + }, + "documentation":"

The input you provided is invalid.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "CreateDomainRequest":{ + "type":"structure", + "required":[ + "DomainName", + "DefaultExpirationDays" + ], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

The unique name of the domain.

", + "location":"uri", + "locationName":"DomainName" + }, + "DefaultExpirationDays":{ + "shape":"expirationDaysInteger", + "documentation":"

The default number of days until the data within the domain expires.

" + }, + "DefaultEncryptionKey":{ + "shape":"encryptionKey", + "documentation":"

The default encryption key, which is an AWS managed key, is used when no specific type of encryption key is specified. It is used to encrypt all data before it is placed in permanent or semi-permanent storage.

" + }, + "DeadLetterQueueUrl":{ + "shape":"sqsQueueUrl", + "documentation":"

The URL of the SQS dead letter queue, which is used for reporting errors associated with ingesting data from third party applications. You must set up a policy on the DeadLetterQueue for the SendMessage operation to enable Amazon Connect Customer Profiles to send messages to the DeadLetterQueue.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

The tags used to organize, track, or control access for this resource.

" + } + } + }, + "CreateDomainResponse":{ + "type":"structure", + "required":[ + "DomainName", + "DefaultExpirationDays", + "CreatedAt", + "LastUpdatedAt" + ], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

The unique name of the domain.

" + }, + "DefaultExpirationDays":{ + "shape":"expirationDaysInteger", + "documentation":"

The default number of days until the data within the domain expires.

" + }, + "DefaultEncryptionKey":{ + "shape":"encryptionKey", + "documentation":"

The default encryption key, which is an AWS managed key, is used when no specific type of encryption key is specified. It is used to encrypt all data before it is placed in permanent or semi-permanent storage.

" + }, + "DeadLetterQueueUrl":{ + "shape":"sqsQueueUrl", + "documentation":"

The URL of the SQS dead letter queue, which is used for reporting errors associated with ingesting data from third party applications.

" + }, + "CreatedAt":{ + "shape":"timestamp", + "documentation":"

The timestamp of when the domain was created.

" + }, + "LastUpdatedAt":{ + "shape":"timestamp", + "documentation":"

The timestamp of when the domain was most recently edited.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

The tags used to organize, track, or control access for this resource.

" + } + } + }, + "CreateProfileRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

The unique name of the domain.

", + "location":"uri", + "locationName":"DomainName" + }, + "AccountNumber":{ + "shape":"string1To255", + "documentation":"

A unique account number that you have given to the customer.

" + }, + "AdditionalInformation":{ + "shape":"string1To1000", + "documentation":"

Any additional information relevant to the customer's profile.

" + }, + "PartyType":{ + "shape":"PartyType", + "documentation":"

The type of profile used to describe the customer.

" + }, + "BusinessName":{ + "shape":"string1To255", + "documentation":"

The name of the customer’s business.

" + }, + "FirstName":{ + "shape":"string1To255", + "documentation":"

The customer’s first name.

" + }, + "MiddleName":{ + "shape":"string1To255", + "documentation":"

The customer’s middle name.

" + }, + "LastName":{ + "shape":"string1To255", + "documentation":"

The customer’s last name.

" + }, + "BirthDate":{ + "shape":"string1To255", + "documentation":"

The customer’s birth date.

" + }, + "Gender":{ + "shape":"Gender", + "documentation":"

The gender with which the customer identifies.

" + }, + "PhoneNumber":{ + "shape":"string1To255", + "documentation":"

The customer's phone number, which has not been specified as a mobile, home, or business number.

" + }, + "MobilePhoneNumber":{ + "shape":"string1To255", + "documentation":"

The customer’s mobile phone number.

" + }, + "HomePhoneNumber":{ + "shape":"string1To255", + "documentation":"

The customer’s home phone number.

" + }, + "BusinessPhoneNumber":{ + "shape":"string1To255", + "documentation":"

The customer’s business phone number.

" + }, + "EmailAddress":{ + "shape":"string1To255", + "documentation":"

The customer's email address, which has not been specified as a personal or business address.

" + }, + "PersonalEmailAddress":{ + "shape":"string1To255", + "documentation":"

The customer’s personal email address.

" + }, + "BusinessEmailAddress":{ + "shape":"string1To255", + "documentation":"

The customer’s business email address.

" + }, + "Address":{ + "shape":"Address", + "documentation":"

A generic address associated with the customer that is not mailing, shipping, or billing.

" + }, + "ShippingAddress":{ + "shape":"Address", + "documentation":"

The customer’s shipping address.

" + }, + "MailingAddress":{ + "shape":"Address", + "documentation":"

The customer’s mailing address.

" + }, + "BillingAddress":{ + "shape":"Address", + "documentation":"

The customer’s billing address.

" + }, + "Attributes":{ + "shape":"Attributes", + "documentation":"

A key value pair of attributes of a customer profile.

" + } + } + }, + "CreateProfileResponse":{ + "type":"structure", + "required":["ProfileId"], + "members":{ + "ProfileId":{ + "shape":"uuid", + "documentation":"

The unique identifier of a customer profile.

" + } + } + }, + "DeleteDomainRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

The unique name of the domain.

", + "location":"uri", + "locationName":"DomainName" + } + } + }, + "DeleteDomainResponse":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{ + "shape":"message", + "documentation":"

A message that indicates the delete request is done.

" + } + } + }, + "DeleteIntegrationRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

The unique name of the domain.

", + "location":"uri", + "locationName":"DomainName" + }, + "Uri":{ + "shape":"string1To255", + "documentation":"

The URI of the S3 bucket or any other type of data source.

" + } + } + }, + "DeleteIntegrationResponse":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{ + "shape":"message", + "documentation":"

A message that indicates the delete request is done.

" + } + } + }, + "DeleteProfileKeyRequest":{ + "type":"structure", + "required":[ + "ProfileId", + "KeyName", + "Values", + "DomainName" + ], + "members":{ + "ProfileId":{ + "shape":"uuid", + "documentation":"

The unique identifier of a customer profile.

" + }, + "KeyName":{ + "shape":"name", + "documentation":"

A searchable identifier of a customer profile.

" + }, + "Values":{ + "shape":"requestValueList", + "documentation":"

A list of key values.

" + }, + "DomainName":{ + "shape":"name", + "documentation":"

The unique name of the domain.

", + "location":"uri", + "locationName":"DomainName" + } + } + }, + "DeleteProfileKeyResponse":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"message", + "documentation":"

A message that indicates the delete request is done.

" + } + } + }, + "DeleteProfileObjectRequest":{ + "type":"structure", + "required":[ + "ProfileId", + "ProfileObjectUniqueKey", + "ObjectTypeName", + "DomainName" + ], + "members":{ + "ProfileId":{ + "shape":"uuid", + "documentation":"

The unique identifier of a customer profile.

" + }, + "ProfileObjectUniqueKey":{ + "shape":"string1To255", + "documentation":"

The unique identifier of the profile object generated by the service.

" + }, + "ObjectTypeName":{ + "shape":"typeName", + "documentation":"

The name of the profile object type.

" + }, + "DomainName":{ + "shape":"name", + "documentation":"

The unique name of the domain.

", + "location":"uri", + "locationName":"DomainName" + } + } + }, + "DeleteProfileObjectResponse":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"message", + "documentation":"

A message that indicates the delete request is done.

" + } + } + }, + "DeleteProfileObjectTypeRequest":{ + "type":"structure", + "required":[ + "DomainName", + "ObjectTypeName" + ], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

The unique name of the domain.

", + "location":"uri", + "locationName":"DomainName" + }, + "ObjectTypeName":{ + "shape":"typeName", + "documentation":"

The name of the profile object type.

", + "location":"uri", + "locationName":"ObjectTypeName" + } + } + }, + "DeleteProfileObjectTypeResponse":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{ + "shape":"message", + "documentation":"

A message that indicates the delete request is done.

" + } + } + }, + "DeleteProfileRequest":{ + "type":"structure", + "required":[ + "ProfileId", + "DomainName" + ], + "members":{ + "ProfileId":{ + "shape":"uuid", + "documentation":"

The unique identifier of a customer profile.

" + }, + "DomainName":{ + "shape":"name", + "documentation":"

The unique name of the domain.

", + "location":"uri", + "locationName":"DomainName" + } + } + }, + "DeleteProfileResponse":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"message", + "documentation":"

A message that indicates the delete request is done.

" + } + } + }, + "DomainList":{ + "type":"list", + "member":{"shape":"ListDomainItem"} + }, + "DomainStats":{ + "type":"structure", + "members":{ + "ProfileCount":{ + "shape":"long", + "documentation":"

The total number of profiles currently in the domain.

" + }, + "MeteringProfileCount":{ + "shape":"long", + "documentation":"

The number of profiles that you are currently paying for in the domain. If you have more than 100 objects associated with a single profile, that profile counts as two profiles. If you have more than 200 objects, that profile counts as three, and so on.

" + }, + "ObjectCount":{ + "shape":"long", + "documentation":"

The total number of objects in domain.

" + }, + "TotalSize":{ + "shape":"long", + "documentation":"

The total size, in bytes, of all objects in the domain.

" + } + }, + "documentation":"

Usage-specific statistics about the domain.

" + }, + "FieldContentType":{ + "type":"string", + "enum":[ + "STRING", + "NUMBER", + "PHONE_NUMBER", + "EMAIL_ADDRESS", + "NAME" + ] + }, + "FieldMap":{ + "type":"map", + "key":{"shape":"name"}, + "value":{"shape":"ObjectTypeField"} + }, + "FieldNameList":{ + "type":"list", + "member":{"shape":"name"} + }, + "Gender":{ + "type":"string", + "enum":[ + "MALE", + "FEMALE", + "UNSPECIFIED" + ] + }, + "GetDomainRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

A unique name for the domain.

", + "location":"uri", + "locationName":"DomainName" + } + } + }, + "GetDomainResponse":{ + "type":"structure", + "required":[ + "DomainName", + "CreatedAt", + "LastUpdatedAt" + ], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

The unique name of the domain.

" + }, + "DefaultExpirationDays":{ + "shape":"expirationDaysInteger", + "documentation":"

The default number of days until the data within the domain expires.

" + }, + "DefaultEncryptionKey":{ + "shape":"encryptionKey", + "documentation":"

The default encryption key, which is an AWS managed key, is used when no specific type of encryption key is specified. It is used to encrypt all data before it is placed in permanent or semi-permanent storage.

" + }, + "DeadLetterQueueUrl":{ + "shape":"sqsQueueUrl", + "documentation":"

The URL of the SQS dead letter queue, which is used for reporting errors associated with ingesting data from third party applications.

" + }, + "Stats":{ + "shape":"DomainStats", + "documentation":"

Usage-specific statistics about the domain.

" + }, + "CreatedAt":{ + "shape":"timestamp", + "documentation":"

The timestamp of when the domain was created.

" + }, + "LastUpdatedAt":{ + "shape":"timestamp", + "documentation":"

The timestamp of when the domain was most recently edited.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

The tags used to organize, track, or control access for this resource.

" + } + } + }, + "GetIntegrationRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

The unique name of the domain.

", + "location":"uri", + "locationName":"DomainName" + }, + "Uri":{ + "shape":"string1To255", + "documentation":"

The URI of the S3 bucket or any other type of data source.

" + } + } + }, + "GetIntegrationResponse":{ + "type":"structure", + "required":[ + "DomainName", + "Uri", + "ObjectTypeName", + "CreatedAt", + "LastUpdatedAt" + ], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

The unique name of the domain.

" + }, + "Uri":{ + "shape":"string1To255", + "documentation":"

The URI of the S3 bucket or any other type of data source.

" + }, + "ObjectTypeName":{ + "shape":"typeName", + "documentation":"

The name of the profile object type.

" + }, + "CreatedAt":{ + "shape":"timestamp", + "documentation":"

The timestamp of when the domain was created.

" + }, + "LastUpdatedAt":{ + "shape":"timestamp", + "documentation":"

The timestamp of when the domain was most recently edited.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

The tags used to organize, track, or control access for this resource.

" + } + } + }, + "GetProfileObjectTypeRequest":{ + "type":"structure", + "required":[ + "DomainName", + "ObjectTypeName" + ], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

The unique name of the domain.

", + "location":"uri", + "locationName":"DomainName" + }, + "ObjectTypeName":{ + "shape":"typeName", + "documentation":"

The name of the profile object type.

", + "location":"uri", + "locationName":"ObjectTypeName" + } + } + }, + "GetProfileObjectTypeResponse":{ + "type":"structure", + "required":[ + "ObjectTypeName", + "Description" + ], + "members":{ + "ObjectTypeName":{ + "shape":"typeName", + "documentation":"

The name of the profile object type.

" + }, + "Description":{ + "shape":"text", + "documentation":"

The description of the profile object type.

" + }, + "TemplateId":{ + "shape":"name", + "documentation":"

A unique identifier for the object template.

" + }, + "ExpirationDays":{ + "shape":"expirationDaysInteger", + "documentation":"

The number of days until the data in the object expires.

" + }, + "EncryptionKey":{ + "shape":"encryptionKey", + "documentation":"

The customer-provided key to encrypt the profile object that will be created in this profile object type.

" + }, + "AllowProfileCreation":{ + "shape":"boolean", + "documentation":"

Indicates whether a profile should be created when data is received if one doesn’t exist for an object of this type. The default is FALSE. If the AllowProfileCreation flag is set to FALSE, then the service tries to fetch a standard profile and associate this object with the profile. If it is set to TRUE, and if no match is found, then the service creates a new standard profile.

" + }, + "Fields":{ + "shape":"FieldMap", + "documentation":"

A map of the name and ObjectType field.

" + }, + "Keys":{ + "shape":"KeyMap", + "documentation":"

A list of unique keys that can be used to map data to the profile.

" + }, + "CreatedAt":{ + "shape":"timestamp", + "documentation":"

The timestamp of when the domain was created.

" + }, + "LastUpdatedAt":{ + "shape":"timestamp", + "documentation":"

The timestamp of when the domain was most recently edited.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

The tags used to organize, track, or control access for this resource.

" + } + } + }, + "GetProfileObjectTypeTemplateRequest":{ + "type":"structure", + "required":["TemplateId"], + "members":{ + "TemplateId":{ + "shape":"name", + "documentation":"

A unique identifier for the object template.

", + "location":"uri", + "locationName":"TemplateId" + } + } + }, + "GetProfileObjectTypeTemplateResponse":{ + "type":"structure", + "members":{ + "TemplateId":{ + "shape":"name", + "documentation":"

A unique identifier for the object template.

" + }, + "SourceName":{ + "shape":"name", + "documentation":"

The name of the source of the object template.

" + }, + "SourceObject":{ + "shape":"name", + "documentation":"

The source of the object template.

" + }, + "AllowProfileCreation":{ + "shape":"boolean", + "documentation":"

Indicates whether a profile should be created when data is received if one doesn’t exist for an object of this type. The default is FALSE. If the AllowProfileCreation flag is set to FALSE, then the service tries to fetch a standard profile and associate this object with the profile. If it is set to TRUE, and if no match is found, then the service creates a new standard profile.

" + }, + "Fields":{ + "shape":"FieldMap", + "documentation":"

A map of the name and ObjectType field.

" + }, + "Keys":{ + "shape":"KeyMap", + "documentation":"

A list of unique keys that can be used to map data to the profile.

" + } + } + }, + "IntegrationList":{ + "type":"list", + "member":{"shape":"ListIntegrationItem"} + }, + "InternalServerException":{ + "type":"structure", + "members":{ + "Message":{"shape":"message"} + }, + "documentation":"

An internal service error occurred.

", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "KeyMap":{ + "type":"map", + "key":{"shape":"name"}, + "value":{"shape":"ObjectTypeKeyList"} + }, + "ListAccountIntegrationsRequest":{ + "type":"structure", + "required":["Uri"], + "members":{ + "Uri":{ + "shape":"string1To255", + "documentation":"

The URI of the S3 bucket or any other type of data source.

" + }, + "NextToken":{ + "shape":"token", + "documentation":"

The pagination token from the previous ListAccountIntegrations API call.

", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"maxSize100", + "documentation":"

The maximum number of objects returned per page.

", + "location":"querystring", + "locationName":"max-results" + } + } + }, + "ListAccountIntegrationsResponse":{ + "type":"structure", + "members":{ + "Items":{ + "shape":"IntegrationList", + "documentation":"

The list of ListAccountIntegration instances.

" + }, + "NextToken":{ + "shape":"token", + "documentation":"

The pagination token from the previous ListAccountIntegrations API call.

" + } + } + }, + "ListDomainItem":{ + "type":"structure", + "required":[ + "DomainName", + "CreatedAt", + "LastUpdatedAt" + ], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

The unique name of the domain.

" + }, + "CreatedAt":{ + "shape":"timestamp", + "documentation":"

The timestamp of when the domain was created.

" + }, + "LastUpdatedAt":{ + "shape":"timestamp", + "documentation":"

The timestamp of when the domain was most recently edited.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

The tags used to organize, track, or control access for this resource.

" + } + }, + "documentation":"

An object in a list that represents a domain.

" + }, + "ListDomainsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"token", + "documentation":"

The pagination token from the previous ListDomain API call.

", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"maxSize100", + "documentation":"

The maximum number of objects returned per page.

", + "location":"querystring", + "locationName":"max-results" + } + } + }, + "ListDomainsResponse":{ + "type":"structure", + "members":{ + "Items":{ + "shape":"DomainList", + "documentation":"

The list of ListDomains instances.

" + }, + "NextToken":{ + "shape":"token", + "documentation":"

The pagination token from the previous ListDomains API call.

" + } + } + }, + "ListIntegrationItem":{ + "type":"structure", + "required":[ + "DomainName", + "Uri", + "ObjectTypeName", + "CreatedAt", + "LastUpdatedAt" + ], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

The unique name of the domain.

" + }, + "Uri":{ + "shape":"string1To255", + "documentation":"

The URI of the S3 bucket or any other type of data source.

" + }, + "ObjectTypeName":{ + "shape":"typeName", + "documentation":"

The name of the profile object type.

" + }, + "CreatedAt":{ + "shape":"timestamp", + "documentation":"

The timestamp of when the domain was created.

" + }, + "LastUpdatedAt":{ + "shape":"timestamp", + "documentation":"

The timestamp of when the domain was most recently edited.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

The tags used to organize, track, or control access for this resource.

" + } + }, + "documentation":"

An integration in list of integrations.

" + }, + "ListIntegrationsRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

The unique name of the domain.

", + "location":"uri", + "locationName":"DomainName" + }, + "NextToken":{ + "shape":"token", + "documentation":"

The pagination token from the previous ListIntegrations API call.

", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"maxSize100", + "documentation":"

The maximum number of objects returned per page.

", + "location":"querystring", + "locationName":"max-results" + } + } + }, + "ListIntegrationsResponse":{ + "type":"structure", + "members":{ + "Items":{ + "shape":"IntegrationList", + "documentation":"

The list of ListIntegrations instances.

" + }, + "NextToken":{ + "shape":"token", + "documentation":"

The pagination token from the previous ListIntegrations API call.

" + } + } + }, + "ListProfileObjectTypeItem":{ + "type":"structure", + "required":[ + "ObjectTypeName", + "Description" + ], + "members":{ + "ObjectTypeName":{ + "shape":"typeName", + "documentation":"

The name of the profile object type.

" + }, + "Description":{ + "shape":"text", + "documentation":"

Description of the profile object type.

" + }, + "CreatedAt":{ + "shape":"timestamp", + "documentation":"

The timestamp of when the domain was created.

" + }, + "LastUpdatedAt":{ + "shape":"timestamp", + "documentation":"

The timestamp of when the domain was most recently edited.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

The tags used to organize, track, or control access for this resource.

" + } + }, + "documentation":"

A ProfileObjectType instance.

" + }, + "ListProfileObjectTypeTemplateItem":{ + "type":"structure", + "members":{ + "TemplateId":{ + "shape":"name", + "documentation":"

A unique identifier for the object template.

" + }, + "SourceName":{ + "shape":"name", + "documentation":"

The name of the source of the object template.

" + }, + "SourceObject":{ + "shape":"name", + "documentation":"

The source of the object template.

" + } + }, + "documentation":"

A ProfileObjectTypeTemplate in a list of ProfileObjectTypeTemplates.

" + }, + "ListProfileObjectTypeTemplatesRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"token", + "documentation":"

The pagination token from the previous ListObjectTypeTemplates API call.

", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"maxSize100", + "documentation":"

The maximum number of objects returned per page.

", + "location":"querystring", + "locationName":"max-results" + } + } + }, + "ListProfileObjectTypeTemplatesResponse":{ + "type":"structure", + "members":{ + "Items":{ + "shape":"ProfileObjectTypeTemplateList", + "documentation":"

The list of ListProfileObjectType template instances.

" + }, + "NextToken":{ + "shape":"token", + "documentation":"

The pagination token from the previous ListObjectTypeTemplates API call.

" + } + } + }, + "ListProfileObjectTypesRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

The unique name of the domain.

", + "location":"uri", + "locationName":"DomainName" + }, + "NextToken":{ + "shape":"token", + "documentation":"

Identifies the next page of results to return.

", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"maxSize100", + "documentation":"

The maximum number of objects returned per page.

", + "location":"querystring", + "locationName":"max-results" + } + } + }, + "ListProfileObjectTypesResponse":{ + "type":"structure", + "members":{ + "Items":{ + "shape":"ProfileObjectTypeList", + "documentation":"

The list of ListProfileObjectTypes instances.

" + }, + "NextToken":{ + "shape":"token", + "documentation":"

Identifies the next page of results to return.

" + } + } + }, + "ListProfileObjectsItem":{ + "type":"structure", + "members":{ + "ObjectTypeName":{ + "shape":"typeName", + "documentation":"

Specifies the kind of object being added to a profile, such as \"Salesforce-Account.\"

" + }, + "ProfileObjectUniqueKey":{ + "shape":"string1To255", + "documentation":"

The unique identifier of the ProfileObject generated by the service.

" + }, + "Object":{ + "shape":"stringifiedJson", + "documentation":"

A JSON representation of a ProfileObject that belongs to a profile.

" + } + }, + "documentation":"

A ProfileObject in a list of ProfileObjects.

" + }, + "ListProfileObjectsRequest":{ + "type":"structure", + "required":[ + "DomainName", + "ObjectTypeName", + "ProfileId" + ], + "members":{ + "NextToken":{ + "shape":"token", + "documentation":"

The pagination token from the previous call to ListProfileObjects.

", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"maxSize100", + "documentation":"

The maximum number of objects returned per page.

", + "location":"querystring", + "locationName":"max-results" + }, + "DomainName":{ + "shape":"name", + "documentation":"

The unique name of the domain.

", + "location":"uri", + "locationName":"DomainName" + }, + "ObjectTypeName":{ + "shape":"typeName", + "documentation":"

The name of the profile object type.

" + }, + "ProfileId":{ + "shape":"uuid", + "documentation":"

The unique identifier of a customer profile.

" + } + } + }, + "ListProfileObjectsResponse":{ + "type":"structure", + "members":{ + "Items":{ + "shape":"ProfileObjectList", + "documentation":"

The list of ListProfileObject instances.

" + }, + "NextToken":{ + "shape":"token", + "documentation":"

The pagination token from the previous call to ListProfileObjects.

" + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"TagArn", + "documentation":"

The ARN of the resource for which you want to view tags.

", + "location":"uri", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "tags":{ + "shape":"TagMap", + "documentation":"

The tags used to organize, track, or control access for this resource.

" + } + } + }, + "ObjectTypeField":{ + "type":"structure", + "members":{ + "Source":{ + "shape":"text", + "documentation":"

A field of a ProfileObject. For example: _source.FirstName, where “_source” is a ProfileObjectType of a Zendesk user and “FirstName” is a field in that ObjectType.

" + }, + "Target":{ + "shape":"text", + "documentation":"

The location of the data in the standard ProfileObject model. For example: _profile.Address.PostalCode.

" + }, + "ContentType":{ + "shape":"FieldContentType", + "documentation":"

The content type of the field. Used for determining equality when searching.

" + } + }, + "documentation":"

Represents a field in a ProfileObjectType.

" + }, + "ObjectTypeKey":{ + "type":"structure", + "members":{ + "StandardIdentifiers":{ + "shape":"StandardIdentifierList", + "documentation":"

The types of keys that a ProfileObject can have. Each ProfileObject can have only 1 UNIQUE key but multiple PROFILE keys. PROFILE means that this key can be used to tie an object to a PROFILE. UNIQUE means that it can be used to uniquely identify an object. If a key a is marked as SECONDARY, it will be used to search for profiles after all other PROFILE keys have been searched. A LOOKUP_ONLY key is only used to match a profile but is not persisted to be used for searching of the profile. A NEW_ONLY key is only used if the profile does not already exist before the object is ingested, otherwise it is only used for matching objects to profiles.

" + }, + "FieldNames":{ + "shape":"FieldNameList", + "documentation":"

The reference for the key name of the fields map.

" + } + }, + "documentation":"

An object that defines the Key element of a ProfileObject. A Key is a special element that can be used to search for a customer profile.

" + }, + "ObjectTypeKeyList":{ + "type":"list", + "member":{"shape":"ObjectTypeKey"} + }, + "PartyType":{ + "type":"string", + "enum":[ + "INDIVIDUAL", + "BUSINESS", + "OTHER" + ] + }, + "Profile":{ + "type":"structure", + "members":{ + "ProfileId":{ + "shape":"uuid", + "documentation":"

The unique identifier of a customer profile.

" + }, + "AccountNumber":{ + "shape":"string1To255", + "documentation":"

A unique account number that you have given to the customer.

" + }, + "AdditionalInformation":{ + "shape":"string1To1000", + "documentation":"

Any additional information relevant to the customer's profile.

" + }, + "PartyType":{ + "shape":"PartyType", + "documentation":"

The type of profile used to describe the customer.

" + }, + "BusinessName":{ + "shape":"string1To255", + "documentation":"

The name of the customer’s business.

" + }, + "FirstName":{ + "shape":"string1To255", + "documentation":"

The customer’s first name.

" + }, + "MiddleName":{ + "shape":"string1To255", + "documentation":"

The customer’s middle name.

" + }, + "LastName":{ + "shape":"string1To255", + "documentation":"

The customer’s last name.

" + }, + "BirthDate":{ + "shape":"string1To255", + "documentation":"

The customer’s birth date.

" + }, + "Gender":{ + "shape":"Gender", + "documentation":"

The gender with which the customer identifies.

" + }, + "PhoneNumber":{ + "shape":"string1To255", + "documentation":"

The customer's phone number, which has not been specified as a mobile, home, or business number.

" + }, + "MobilePhoneNumber":{ + "shape":"string1To255", + "documentation":"

The customer’s mobile phone number.

" + }, + "HomePhoneNumber":{ + "shape":"string1To255", + "documentation":"

The customer’s home phone number.

" + }, + "BusinessPhoneNumber":{ + "shape":"string1To255", + "documentation":"

The customer’s home phone number.

" + }, + "EmailAddress":{ + "shape":"string1To255", + "documentation":"

The customer's email address, which has not been specified as a personal or business address.

" + }, + "PersonalEmailAddress":{ + "shape":"string1To255", + "documentation":"

The customer’s personal email address.

" + }, + "BusinessEmailAddress":{ + "shape":"string1To255", + "documentation":"

The customer’s business email address.

" + }, + "Address":{ + "shape":"Address", + "documentation":"

A generic address associated with the customer that is not mailing, shipping, or billing.

" + }, + "ShippingAddress":{ + "shape":"Address", + "documentation":"

The customer’s shipping address.

" + }, + "MailingAddress":{ + "shape":"Address", + "documentation":"

The customer’s mailing address.

" + }, + "BillingAddress":{ + "shape":"Address", + "documentation":"

The customer’s billing address.

" + }, + "Attributes":{ + "shape":"Attributes", + "documentation":"

A key value pair of attributes of a customer profile.

" + } + }, + "documentation":"

The standard profile of a customer.

" + }, + "ProfileList":{ + "type":"list", + "member":{"shape":"Profile"} + }, + "ProfileObjectList":{ + "type":"list", + "member":{"shape":"ListProfileObjectsItem"} + }, + "ProfileObjectTypeList":{ + "type":"list", + "member":{"shape":"ListProfileObjectTypeItem"} + }, + "ProfileObjectTypeTemplateList":{ + "type":"list", + "member":{"shape":"ListProfileObjectTypeTemplateItem"} + }, + "PutIntegrationRequest":{ + "type":"structure", + "required":[ + "DomainName", + "Uri", + "ObjectTypeName" + ], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

The unique name of the domain.

", + "location":"uri", + "locationName":"DomainName" + }, + "Uri":{ + "shape":"string1To255", + "documentation":"

The URI of the S3 bucket or any other type of data source.

" + }, + "ObjectTypeName":{ + "shape":"typeName", + "documentation":"

The name of the profile object type.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

The tags used to organize, track, or control access for this resource.

" + } + } + }, + "PutIntegrationResponse":{ + "type":"structure", + "required":[ + "DomainName", + "Uri", + "ObjectTypeName", + "CreatedAt", + "LastUpdatedAt" + ], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

The unique name of the domain.

" + }, + "Uri":{ + "shape":"string1To255", + "documentation":"

The URI of the S3 bucket or any other type of data source.

" + }, + "ObjectTypeName":{ + "shape":"typeName", + "documentation":"

The name of the profile object type.

" + }, + "CreatedAt":{ + "shape":"timestamp", + "documentation":"

The timestamp of when the domain was created.

" + }, + "LastUpdatedAt":{ + "shape":"timestamp", + "documentation":"

The timestamp of when the domain was most recently edited.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

The tags used to organize, track, or control access for this resource.

" + } + } + }, + "PutProfileObjectRequest":{ + "type":"structure", + "required":[ + "ObjectTypeName", + "Object", + "DomainName" + ], + "members":{ + "ObjectTypeName":{ + "shape":"typeName", + "documentation":"

The name of the profile object type.

" + }, + "Object":{ + "shape":"stringifiedJson", + "documentation":"

A string that is serialized from a JSON object.

" + }, + "DomainName":{ + "shape":"name", + "documentation":"

The unique name of the domain.

", + "location":"uri", + "locationName":"DomainName" + } + } + }, + "PutProfileObjectResponse":{ + "type":"structure", + "members":{ + "ProfileObjectUniqueKey":{ + "shape":"string1To255", + "documentation":"

The unique identifier of the profile object generated by the service.

" + } + } + }, + "PutProfileObjectTypeRequest":{ + "type":"structure", + "required":[ + "DomainName", + "ObjectTypeName", + "Description" + ], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

The unique name of the domain.

", + "location":"uri", + "locationName":"DomainName" + }, + "ObjectTypeName":{ + "shape":"typeName", + "documentation":"

The name of the profile object type.

", + "location":"uri", + "locationName":"ObjectTypeName" + }, + "Description":{ + "shape":"text", + "documentation":"

Description of the profile object type.

" + }, + "TemplateId":{ + "shape":"name", + "documentation":"

A unique identifier for the object template.

" + }, + "ExpirationDays":{ + "shape":"expirationDaysInteger", + "documentation":"

The number of days until the data in the object expires.

" + }, + "EncryptionKey":{ + "shape":"encryptionKey", + "documentation":"

The customer-provided key to encrypt the profile object that will be created in this profile object type.

" + }, + "AllowProfileCreation":{ + "shape":"boolean", + "documentation":"

Indicates whether a profile should be created when data is received if one doesn’t exist for an object of this type. The default is FALSE. If the AllowProfileCreation flag is set to FALSE, then the service tries to fetch a standard profile and associate this object with the profile. If it is set to TRUE, and if no match is found, then the service creates a new standard profile.

" + }, + "Fields":{ + "shape":"FieldMap", + "documentation":"

A map of the name and ObjectType field.

" + }, + "Keys":{ + "shape":"KeyMap", + "documentation":"

A list of unique keys that can be used to map data to the profile.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

The tags used to organize, track, or control access for this resource.

" + } + } + }, + "PutProfileObjectTypeResponse":{ + "type":"structure", + "required":[ + "ObjectTypeName", + "Description" + ], + "members":{ + "ObjectTypeName":{ + "shape":"typeName", + "documentation":"

The name of the profile object type.

" + }, + "Description":{ + "shape":"text", + "documentation":"

Description of the profile object type.

" + }, + "TemplateId":{ + "shape":"name", + "documentation":"

A unique identifier for the object template.

" + }, + "ExpirationDays":{ + "shape":"expirationDaysInteger", + "documentation":"

The number of days until the data in the object expires.

" + }, + "EncryptionKey":{ + "shape":"encryptionKey", + "documentation":"

The customer-provided key to encrypt the profile object that will be created in this profile object type.

" + }, + "AllowProfileCreation":{ + "shape":"boolean", + "documentation":"

Indicates whether a profile should be created when data is received if one doesn’t exist for an object of this type. The default is FALSE. If the AllowProfileCreation flag is set to FALSE, then the service tries to fetch a standard profile and associate this object with the profile. If it is set to TRUE, and if no match is found, then the service creates a new standard profile.

" + }, + "Fields":{ + "shape":"FieldMap", + "documentation":"

A map of the name and ObjectType field.

" + }, + "Keys":{ + "shape":"KeyMap", + "documentation":"

A list of unique keys that can be used to map data to the profile.

" + }, + "CreatedAt":{ + "shape":"timestamp", + "documentation":"

The timestamp of when the domain was created.

" + }, + "LastUpdatedAt":{ + "shape":"timestamp", + "documentation":"

The timestamp of when the domain was most recently edited.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

The tags used to organize, track, or control access for this resource.

" + } + } + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"message"} + }, + "documentation":"

The requested resource does not exist, or access was denied.

", + "error":{"httpStatusCode":404}, + "exception":true + }, + "SearchProfilesRequest":{ + "type":"structure", + "required":[ + "DomainName", + "KeyName", + "Values" + ], + "members":{ + "NextToken":{ + "shape":"token", + "documentation":"

The pagination token from the previous SearchProfiles API call.

", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"maxSize100", + "documentation":"

The maximum number of objects returned per page.

", + "location":"querystring", + "locationName":"max-results" + }, + "DomainName":{ + "shape":"name", + "documentation":"

The unique name of the domain.

", + "location":"uri", + "locationName":"DomainName" + }, + "KeyName":{ + "shape":"name", + "documentation":"

A searchable identifier of a customer profile. The predefined keys you can use to search include: _account, _profileId, _fullName, _phone, _email, _ctrContactId, _marketoLeadId, _salesforceAccountId, _salesforceContactId, _zendeskUserId, _zendeskExternalId, _serviceNowSystemId.

" + }, + "Values":{ + "shape":"requestValueList", + "documentation":"

A list of key values.

" + } + } + }, + "SearchProfilesResponse":{ + "type":"structure", + "members":{ + "Items":{ + "shape":"ProfileList", + "documentation":"

The list of SearchProfiles instances.

" + }, + "NextToken":{ + "shape":"token", + "documentation":"

The pagination token from the previous SearchProfiles API call.

" + } + } + }, + "StandardIdentifier":{ + "type":"string", + "enum":[ + "PROFILE", + "UNIQUE", + "SECONDARY", + "LOOKUP_ONLY", + "NEW_ONLY" + ] + }, + "StandardIdentifierList":{ + "type":"list", + "member":{"shape":"StandardIdentifier"} + }, + "TagArn":{ + "type":"string", + "max":256, + "pattern":"^arn:[a-z0-9]{1,10}:profile" + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^(?!aws:)[a-zA-Z+-=._:/]+$" + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":50, + "min":1 + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":50, + "min":1 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"TagArn", + "documentation":"

The ARN of the resource that you're adding tags to.

", + "location":"uri", + "locationName":"resourceArn" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The tags used to organize, track, or control access for this resource.

" + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256 + }, + "ThrottlingException":{ + "type":"structure", + "members":{ + "Message":{"shape":"message"} + }, + "documentation":"

You exceeded the maximum number of requests.

", + "error":{"httpStatusCode":429}, + "exception":true + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"TagArn", + "documentation":"

The ARN of the resource from which you are removing tags.

", + "location":"uri", + "locationName":"resourceArn" + }, + "tagKeys":{ + "shape":"TagKeyList", + "documentation":"

The list of tag keys to remove from the resource.

", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateAddress":{ + "type":"structure", + "members":{ + "Address1":{ + "shape":"string0To255", + "documentation":"

The first line of a customer address.

" + }, + "Address2":{ + "shape":"string0To255", + "documentation":"

The second line of a customer address.

" + }, + "Address3":{ + "shape":"string0To255", + "documentation":"

The third line of a customer address.

" + }, + "Address4":{ + "shape":"string0To255", + "documentation":"

The fourth line of a customer address.

" + }, + "City":{ + "shape":"string0To255", + "documentation":"

The city in which a customer lives.

" + }, + "County":{ + "shape":"string0To255", + "documentation":"

The county in which a customer lives.

" + }, + "State":{ + "shape":"string0To255", + "documentation":"

The state in which a customer lives.

" + }, + "Province":{ + "shape":"string0To255", + "documentation":"

The province in which a customer lives.

" + }, + "Country":{ + "shape":"string0To255", + "documentation":"

The country in which a customer lives.

" + }, + "PostalCode":{ + "shape":"string0To255", + "documentation":"

The postal code of a customer address.

" + } + }, + "documentation":"

Updates associated with the address properties of a customer profile.

" + }, + "UpdateAttributes":{ + "type":"map", + "key":{"shape":"string1To255"}, + "value":{"shape":"string0To255"} + }, + "UpdateDomainRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

The unique name for the domain.

", + "location":"uri", + "locationName":"DomainName" + }, + "DefaultExpirationDays":{ + "shape":"expirationDaysInteger", + "documentation":"

The default number of days until the data within the domain expires.

" + }, + "DefaultEncryptionKey":{ + "shape":"encryptionKey", + "documentation":"

The default encryption key, which is an AWS managed key, is used when no specific type of encryption key is specified. It is used to encrypt all data before it is placed in permanent or semi-permanent storage. If specified as an empty string, it will clear any existing value.

" + }, + "DeadLetterQueueUrl":{ + "shape":"sqsQueueUrl", + "documentation":"

The URL of the SQS dead letter queue, which is used for reporting errors associated with ingesting data from third party applications. If specified as an empty string, it will clear any existing value. You must set up a policy on the DeadLetterQueue for the SendMessage operation to enable Amazon Connect Customer Profiles to send messages to the DeadLetterQueue.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

The tags used to organize, track, or control access for this resource.

" + } + } + }, + "UpdateDomainResponse":{ + "type":"structure", + "required":[ + "DomainName", + "CreatedAt", + "LastUpdatedAt" + ], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

The unique name for the domain.

" + }, + "DefaultExpirationDays":{ + "shape":"expirationDaysInteger", + "documentation":"

The default number of days until the data within the domain expires.

" + }, + "DefaultEncryptionKey":{ + "shape":"encryptionKey", + "documentation":"

The default encryption key, which is an AWS managed key, is used when no specific type of encryption key is specified. It is used to encrypt all data before it is placed in permanent or semi-permanent storage.

" + }, + "DeadLetterQueueUrl":{ + "shape":"sqsQueueUrl", + "documentation":"

The URL of the SQS dead letter queue, which is used for reporting errors associated with ingesting data from third party applications.

" + }, + "CreatedAt":{ + "shape":"timestamp", + "documentation":"

The timestamp of when the domain was created.

" + }, + "LastUpdatedAt":{ + "shape":"timestamp", + "documentation":"

The timestamp of when the domain was most recently edited.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

The tags used to organize, track, or control access for this resource.

" + } + } + }, + "UpdateProfileRequest":{ + "type":"structure", + "required":[ + "DomainName", + "ProfileId" + ], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

The unique name of the domain.

", + "location":"uri", + "locationName":"DomainName" + }, + "ProfileId":{ + "shape":"uuid", + "documentation":"

The unique identifier of a customer profile.

" + }, + "AdditionalInformation":{ + "shape":"string0To1000", + "documentation":"

Any additional information relevant to the customer's profile.

" + }, + "AccountNumber":{ + "shape":"string0To255", + "documentation":"

A unique account number that you have given to the customer.

" + }, + "PartyType":{ + "shape":"PartyType", + "documentation":"

The type of profile used to describe the customer.

" + }, + "BusinessName":{ + "shape":"string0To255", + "documentation":"

The name of the customer’s business.

" + }, + "FirstName":{ + "shape":"string0To255", + "documentation":"

The customer’s first name.

" + }, + "MiddleName":{ + "shape":"string0To255", + "documentation":"

The customer’s middle name.

" + }, + "LastName":{ + "shape":"string0To255", + "documentation":"

The customer’s last name.

" + }, + "BirthDate":{ + "shape":"string0To255", + "documentation":"

The customer’s birth date.

" + }, + "Gender":{ + "shape":"Gender", + "documentation":"

The gender with which the customer identifies.

" + }, + "PhoneNumber":{ + "shape":"string0To255", + "documentation":"

The customer's phone number, which has not been specified as a mobile, home, or business number.

" + }, + "MobilePhoneNumber":{ + "shape":"string0To255", + "documentation":"

The customer’s mobile phone number.

" + }, + "HomePhoneNumber":{ + "shape":"string0To255", + "documentation":"

The customer’s home phone number.

" + }, + "BusinessPhoneNumber":{ + "shape":"string0To255", + "documentation":"

The customer’s business phone number.

" + }, + "EmailAddress":{ + "shape":"string0To255", + "documentation":"

The customer's email address, which has not been specified as a personal or business address.

" + }, + "PersonalEmailAddress":{ + "shape":"string0To255", + "documentation":"

The customer’s personal email address.

" + }, + "BusinessEmailAddress":{ + "shape":"string0To255", + "documentation":"

The customer’s business email address.

" + }, + "Address":{ + "shape":"UpdateAddress", + "documentation":"

A generic address associated with the customer that is not mailing, shipping, or billing.

" + }, + "ShippingAddress":{ + "shape":"UpdateAddress", + "documentation":"

The customer’s shipping address.

" + }, + "MailingAddress":{ + "shape":"UpdateAddress", + "documentation":"

The customer’s mailing address.

" + }, + "BillingAddress":{ + "shape":"UpdateAddress", + "documentation":"

The customer’s billing address.

" + }, + "Attributes":{ + "shape":"UpdateAttributes", + "documentation":"

A key value pair of attributes of a customer profile.

" + } + } + }, + "UpdateProfileResponse":{ + "type":"structure", + "required":["ProfileId"], + "members":{ + "ProfileId":{ + "shape":"uuid", + "documentation":"

The unique identifier of a customer profile.

" + } + } + }, + "boolean":{"type":"boolean"}, + "encryptionKey":{ + "type":"string", + "max":255, + "min":0 + }, + "expirationDaysInteger":{ + "type":"integer", + "max":1098, + "min":1 + }, + "long":{"type":"long"}, + "maxSize100":{ + "type":"integer", + "max":100, + "min":1 + }, + "message":{"type":"string"}, + "requestValueList":{ + "type":"list", + "member":{"shape":"string1To255"} + }, + "sqsQueueUrl":{ + "type":"string", + "max":255, + "min":0 + }, + "string0To1000":{ + "type":"string", + "max":1000, + "min":0 + }, + "string0To255":{ + "type":"string", + "max":255, + "min":0 + }, + "string1To1000":{ + "type":"string", + "max":1000, + "min":1 + }, + "string1To255":{ + "type":"string", + "max":255, + "min":1 + }, + "stringifiedJson":{ + "type":"string", + "max":256000, + "min":1 + }, + "text":{ + "type":"string", + "max":1000, + "min":1 + }, + "timestamp":{"type":"timestamp"}, + "token":{ + "type":"string", + "max":1024, + "min":1 + }, + "typeName":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^[a-zA-Z_][a-zA-Z_0-9-]*$" + }, + "uuid":{ + "type":"string", + "pattern":"[a-f0-9]{32}" + } + }, + "documentation":"Amazon Connect Customer Profiles

Welcome to the Amazon Connect Customer Profiles API Reference. This guide provides information about the Amazon Connect Customer Profiles API, including supported operations, data types, parameters, and schemas.

Amazon Connect Customer Profiles is a unified customer profile for your contact center that has pre-built connectors powered by AppFlow that make it easy to combine customer information from third party applications, such as Salesforce (CRM), ServiceNow (ITSM), and your enterprise resource planning (ERP), with contact history from your Amazon Connect contact center.

If you're new to Amazon Connect, you might find it helpful to also review the Amazon Connect Administrator Guide.

" +} diff --git a/services/databasemigration/pom.xml b/services/databasemigration/pom.xml index 6a9bef422796..fa8507fc9410 100644 --- a/services/databasemigration/pom.xml +++ b/services/databasemigration/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT databasemigration AWS Java SDK :: Services :: AWS Database Migration Service diff --git a/services/databasemigration/src/main/resources/codegen-resources/service-2.json b/services/databasemigration/src/main/resources/codegen-resources/service-2.json index 27bfbdaea659..abc8a2f3b41f 100644 --- a/services/databasemigration/src/main/resources/codegen-resources/service-2.json +++ b/services/databasemigration/src/main/resources/codegen-resources/service-2.json @@ -67,7 +67,8 @@ {"shape":"ResourceQuotaExceededFault"}, {"shape":"InvalidResourceStateFault"}, {"shape":"ResourceNotFoundFault"}, - {"shape":"AccessDeniedFault"} + {"shape":"AccessDeniedFault"}, + {"shape":"S3AccessDeniedFault"} ], "documentation":"

Creates an endpoint using the provided settings.

" }, @@ -648,6 +649,21 @@ ], "documentation":"

Modifies the specified replication task.

You can't modify the task endpoints. The task must be stopped before you can modify it.

For more information about AWS DMS tasks, see Working with Migration Tasks in the AWS Database Migration Service User Guide.

" }, + "MoveReplicationTask":{ + "name":"MoveReplicationTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"MoveReplicationTaskMessage"}, + "output":{"shape":"MoveReplicationTaskResponse"}, + "errors":[ + {"shape":"AccessDeniedFault"}, + {"shape":"InvalidResourceStateFault"}, + {"shape":"ResourceNotFoundFault"} + ], + "documentation":"

Moves a replication task from its current replication instance to a different target replication instance using the specified parameters. The target replication instance must be created with the same or later AWS DMS version as the current replication instance.

" + }, "RebootReplicationInstance":{ "name":"RebootReplicationInstance", "http":{ @@ -1156,7 +1172,12 @@ "IBMDb2Settings":{ "shape":"IBMDb2Settings", "documentation":"

Settings in JSON format for the source IBM Db2 LUW endpoint. For information about other available settings, see Extra connection attributes when using Db2 LUW as a source for AWS DMS in the AWS Database Migration Service User Guide.

" - } + }, + "ResourceIdentifier":{ + "shape":"String", + "documentation":"

A friendly name for the resource identifier at the end of the EndpointArn response parameter that is returned in the created Endpoint object. The value for this parameter can have up to 31 characters. It can contain only ASCII letters, digits, and hyphen ('-'). Also, it can't end with a hyphen or contain two consecutive hyphens, and can only begin with a letter, such as Example-App-ARN1. For example, this value might result in the EndpointArn value arn:aws:dms:eu-west-1:012345678901:rep:Example-App-ARN1. If you don't specify a ResourceIdentifier value, AWS DMS generates a default identifier value for the end of EndpointArn.

" + }, + "DocDbSettings":{"shape":"DocDbSettings"} }, "documentation":"

" }, @@ -1280,6 +1301,10 @@ "DnsNameServers":{ "shape":"String", "documentation":"

A list of custom DNS name servers supported for the replication instance to access your on-premise source or target database. This list overrides the default name servers supported by the replication instance. You can specify a comma-separated list of internet addresses for up to four on-premise DNS name servers. For example: \"1.1.1.1,2.2.2.2,3.3.3.3,4.4.4.4\"

" + }, + "ResourceIdentifier":{ + "shape":"String", + "documentation":"

A friendly name for the resource identifier at the end of the EndpointArn response parameter that is returned in the created Endpoint object. The value for this parameter can have up to 31 characters. It can contain only ASCII letters, digits, and hyphen ('-'). Also, it can't end with a hyphen or contain two consecutive hyphens, and can only begin with a letter, such as Example-App-ARN1. For example, this value might result in the EndpointArn value arn:aws:dms:eu-west-1:012345678901:rep:Example-App-ARN1. If you don't specify a ResourceIdentifier value, AWS DMS generates a default identifier value for the end of EndpointArn.

" } }, "documentation":"

" @@ -1389,6 +1414,10 @@ "TaskData":{ "shape":"String", "documentation":"

Supplemental information that the task requires to migrate the data for certain source and target endpoints. For more information, see Specifying Supplemental Data for Task Settings in the AWS Database Migration Service User Guide.

" + }, + "ResourceIdentifier":{ + "shape":"String", + "documentation":"

A friendly name for the resource identifier at the end of the EndpointArn response parameter that is returned in the created Endpoint object. The value for this parameter can have up to 31 characters. It can contain only ASCII letters, digits, and hyphen ('-'). Also, it can't end with a hyphen or contain two consecutive hyphens, and can only begin with a letter, such as Example-App-ARN1. For example, this value might result in the EndpointArn value arn:aws:dms:eu-west-1:012345678901:rep:Example-App-ARN1. If you don't specify a ResourceIdentifier value, AWS DMS generates a default identifier value for the end of EndpointArn.

" } }, "documentation":"

" @@ -2325,6 +2354,48 @@ }, "documentation":"

The settings in JSON format for the DMS Transfer type source endpoint.

" }, + "DocDbSettings":{ + "type":"structure", + "members":{ + "Username":{ + "shape":"String", + "documentation":"

The user name you use to access the DocumentDB source endpoint.

" + }, + "Password":{ + "shape":"SecretString", + "documentation":"

The password for the user account you use to access the DocumentDB source endpoint.

" + }, + "ServerName":{ + "shape":"String", + "documentation":"

The name of the server on the DocumentDB source endpoint.

" + }, + "Port":{ + "shape":"IntegerOptional", + "documentation":"

The port value for the DocumentDB source endpoint.

" + }, + "DatabaseName":{ + "shape":"String", + "documentation":"

The database name on the DocumentDB source endpoint.

" + }, + "NestingLevel":{ + "shape":"NestingLevelValue", + "documentation":"

Specifies either document or table mode.

Default value is \"none\". Specify \"none\" to use document mode. Specify \"one\" to use table mode.

" + }, + "ExtractDocId":{ + "shape":"BooleanOptional", + "documentation":"

Specifies the document ID. Use this setting when NestingLevel is set to \"none\".

Default value is \"false\".

" + }, + "DocsToInvestigate":{ + "shape":"IntegerOptional", + "documentation":"

Indicates the number of documents to preview to determine the document organization. Use this setting when NestingLevel is set to \"one\".

Must be a positive value greater than 0. Default value is 1000.

" + }, + "KmsKeyId":{ + "shape":"String", + "documentation":"

The AWS KMS key identifier that is used to encrypt the content on the replication instance. If you don't specify a value for the KmsKeyId parameter, then AWS DMS uses your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS Region.

" + } + }, + "documentation":"

Provides information that defines a DocumentDB endpoint.

" + }, "DynamoDbSettings":{ "type":"structure", "required":["ServiceAccessRoleArn"], @@ -2507,7 +2578,8 @@ "IBMDb2Settings":{ "shape":"IBMDb2Settings", "documentation":"

The settings for the IBM Db2 LUW source endpoint. For more information, see the IBMDb2Settings structure.

" - } + }, + "DocDbSettings":{"shape":"DocDbSettings"} }, "documentation":"

Describes an endpoint of a database instance in response to operations such as the following:

  • CreateEndpoint

  • DescribeEndpoint

  • DescribeEndpointTypes

  • ModifyEndpoint

" }, @@ -3114,6 +3186,10 @@ "IBMDb2Settings":{ "shape":"IBMDb2Settings", "documentation":"

Settings in JSON format for the source IBM Db2 LUW endpoint. For information about other available settings, see Extra connection attributes when using Db2 LUW as a source for AWS DMS in the AWS Database Migration Service User Guide.

" + }, + "DocDbSettings":{ + "shape":"DocDbSettings", + "documentation":"

Settings in JSON format for the source DocumentDB endpoint. For more information about the available settings, see the configuration properties section in Using DocumentDB as a Target for AWS Database Migration Service in the AWS Database Migration Service User Guide.

" } }, "documentation":"

" @@ -3365,6 +3441,34 @@ }, "documentation":"

Provides information that defines a MongoDB endpoint.

" }, + "MoveReplicationTaskMessage":{ + "type":"structure", + "required":[ + "ReplicationTaskArn", + "TargetReplicationInstanceArn" + ], + "members":{ + "ReplicationTaskArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the task that you want to move.

" + }, + "TargetReplicationInstanceArn":{ + "shape":"String", + "documentation":"

The ARN of the replication instance where you want to move the task to.

" + } + }, + "documentation":"

" + }, + "MoveReplicationTaskResponse":{ + "type":"structure", + "members":{ + "ReplicationTask":{ + "shape":"ReplicationTask", + "documentation":"

The replication task that was moved.

" + } + }, + "documentation":"

" + }, "MySQLSettings":{ "type":"structure", "members":{ @@ -3773,6 +3877,14 @@ "shape":"String", "documentation":"

The name of the intermediate S3 bucket used to store .csv files before uploading data to Redshift.

" }, + "CaseSensitiveNames":{ + "shape":"BooleanOptional", + "documentation":"

If Amazon Redshift is configured to support case sensitive schema names, set CaseSensitiveNames to true. The default is false.

" + }, + "CompUpdate":{ + "shape":"BooleanOptional", + "documentation":"

If you set CompUpdate to true Amazon Redshift applies automatic compression if the table is empty. This applies even if the table columns already have encodings other than RAW. If you set CompUpdate to false, automatic compression is disabled and existing column encodings aren't changed. The default is true.

" + }, "ConnectionTimeout":{ "shape":"IntegerOptional", "documentation":"

A value that sets the amount of time to wait (in milliseconds) before timing out, beginning from when you initially establish a connection.

" @@ -3793,6 +3905,10 @@ "shape":"EncryptionModeValue", "documentation":"

The type of server-side encryption that you want to use for your data. This encryption type is part of the endpoint settings or the extra connections attributes for Amazon S3. You can choose either SSE_S3 (the default) or SSE_KMS.

For the ModifyEndpoint operation, you can change the existing value of the EncryptionMode parameter from SSE_KMS to SSE_S3. But you can’t change the existing value from SSE_S3 to SSE_KMS.

To use SSE_S3, create an AWS Identity and Access Management (IAM) role with a policy that allows \"arn:aws:s3:::*\" to use the following actions: \"s3:PutObject\", \"s3:ListBucket\"

" }, + "ExplicitIds":{ + "shape":"BooleanOptional", + "documentation":"

This setting is only valid for a full-load migration task. Set ExplicitIds to true to have tables with IDENTITY columns override their auto-generated values with explicit values loaded from the source data files used to populate the tables. The default is false.

" + }, "FileTransferUploadStreams":{ "shape":"IntegerOptional", "documentation":"

The number of threads used to upload a single file. This parameter accepts a value from 1 through 64. It defaults to 10.

The number of parallel streams used to upload a single .csv file to an S3 bucket using S3 Multipart Upload. For more information, see Multipart upload overview.

FileTransferUploadStreams accepts a value from 1 through 64. It defaults to 10.

" @@ -4200,15 +4316,15 @@ }, "SourceEndpointArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) string that uniquely identifies the endpoint.

" + "documentation":"

The Amazon Resource Name (ARN) that uniquely identifies the endpoint.

" }, "TargetEndpointArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) string that uniquely identifies the endpoint.

" + "documentation":"

The ARN that uniquely identifies the endpoint.

" }, "ReplicationInstanceArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the replication instance.

" + "documentation":"

The ARN of the replication instance.

" }, "MigrationType":{ "shape":"MigrationTypeValue", @@ -4224,7 +4340,7 @@ }, "Status":{ "shape":"String", - "documentation":"

The status of the replication task.

" + "documentation":"

The status of the replication task. This response parameter can return one of the following values:

" }, "LastFailureMessage":{ "shape":"String", @@ -4232,7 +4348,7 @@ }, "StopReason":{ "shape":"String", - "documentation":"

The reason the replication task was stopped. This response parameter can return one of the following values:

  • \"STOP_REASON_FULL_LOAD_COMPLETED\" – Full-load migration completed.

  • \"STOP_REASON_CACHED_CHANGES_APPLIED\" – Change data capture (CDC) load completed.

  • \"STOP_REASON_CACHED_CHANGES_NOT_APPLIED\" – In a full-load and CDC migration, the full-load stopped as specified before starting the CDC migration.

  • \"STOP_REASON_SERVER_TIME\" – The migration stopped at the specified server time.

" + "documentation":"

The reason the replication task was stopped. This response parameter can return one of the following values:

  • \"STOP_REASON_FULL_LOAD_COMPLETED\" – Full-load migration completed.

  • \"STOP_REASON_CACHED_CHANGES_APPLIED\" – Change data capture (CDC) load completed.

  • \"STOP_REASON_CACHED_CHANGES_NOT_APPLIED\" – In a full-load and CDC migration, the full load stopped as specified before starting the CDC migration.

  • \"STOP_REASON_SERVER_TIME\" – The migration stopped at the specified server time.

" }, "ReplicationTaskCreationDate":{ "shape":"TStamp", @@ -4265,6 +4381,10 @@ "TaskData":{ "shape":"String", "documentation":"

Supplemental information that the task requires to migrate the data for certain source and target endpoints. For more information, see Specifying Supplemental Data for Task Settings in the AWS Database Migration Service User Guide.

" + }, + "TargetReplicationInstanceArn":{ + "shape":"String", + "documentation":"

The ARN of the replication instance to which this task is moved in response to running the MoveReplicationTask operation. Otherwise, this response parameter isn't a member of the ReplicationTask object.

" } }, "documentation":"

Provides information that describes a replication task created by the CreateReplicationTask operation.

" @@ -4617,7 +4737,7 @@ }, "DatePartitionEnabled":{ "shape":"BooleanOptional", - "documentation":"

When set to true, this parameter partitions S3 bucket folders based on transaction commit dates. The default value is false. For more information about date-based folder partitoning, see Using date-based folder partitioning

" + "documentation":"

When set to true, this parameter partitions S3 bucket folders based on transaction commit dates. The default value is false. For more information about date-based folder partitoning, see Using date-based folder partitioning.

" }, "DatePartitionSequence":{ "shape":"DatePartitionSequenceValue", @@ -4625,7 +4745,7 @@ }, "DatePartitionDelimiter":{ "shape":"DatePartitionDelimiterValue", - "documentation":"

Specifies a date separating delimiter to use during folder partitioning. The default value is SLASH (/). Use this parameter when DatePartitionedEnabled is set to true.

" + "documentation":"

Specifies a date separating delimiter to use during folder partitioning. The default value is SLASH. Use this parameter when DatePartitionedEnabled is set to true.

" } }, "documentation":"

Settings for exporting data to Amazon S3.

" @@ -4768,7 +4888,7 @@ }, "StartReplicationTaskType":{ "shape":"StartReplicationTaskTypeValue", - "documentation":"

The type of replication task.

" + "documentation":"

A type of replication task.

" }, "CdcStartTime":{ "shape":"TStamp", diff --git a/services/databrew/pom.xml b/services/databrew/pom.xml new file mode 100644 index 000000000000..8cbb34dbeec8 --- /dev/null +++ b/services/databrew/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.40-SNAPSHOT + + databrew + AWS Java SDK :: Services :: Data Brew + The AWS Java SDK for Data Brew module holds the client classes that are used for + communicating with Data Brew. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.databrew + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/databrew/src/main/resources/codegen-resources/paginators-1.json b/services/databrew/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..d9620b8c4cd3 --- /dev/null +++ b/services/databrew/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,46 @@ +{ + "pagination": { + "ListDatasets": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Datasets" + }, + "ListJobRuns": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "JobRuns" + }, + "ListJobs": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Jobs" + }, + "ListProjects": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Projects" + }, + "ListRecipeVersions": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Recipes" + }, + "ListRecipes": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Recipes" + }, + "ListSchedules": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Schedules" + } + } +} \ No newline at end of file diff --git a/services/databrew/src/main/resources/codegen-resources/service-2.json b/services/databrew/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..7b61b3922202 --- /dev/null +++ b/services/databrew/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,3099 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2017-07-25", + "endpointPrefix":"databrew", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"AWS Glue DataBrew", + "serviceId":"DataBrew", + "signatureVersion":"v4", + "signingName":"databrew", + "uid":"databrew-2017-07-25" + }, + "operations":{ + "BatchDeleteRecipeVersion":{ + "name":"BatchDeleteRecipeVersion", + "http":{ + "method":"POST", + "requestUri":"/recipes/{name}/batchDeleteRecipeVersion" + }, + "input":{"shape":"BatchDeleteRecipeVersionRequest"}, + "output":{"shape":"BatchDeleteRecipeVersionResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Deletes one or more versions of a recipe at a time.

" + }, + "CreateDataset":{ + "name":"CreateDataset", + "http":{ + "method":"POST", + "requestUri":"/datasets" + }, + "input":{"shape":"CreateDatasetRequest"}, + "output":{"shape":"CreateDatasetResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Creates a new AWS Glue DataBrew dataset for this AWS account.

" + }, + "CreateProfileJob":{ + "name":"CreateProfileJob", + "http":{ + "method":"POST", + "requestUri":"/profileJobs" + }, + "input":{"shape":"CreateProfileJobRequest"}, + "output":{"shape":"CreateProfileJobResponse"}, + "errors":[ + {"shape":"ValidationException"} + ], + "documentation":"

Creates a new job to profile an AWS Glue DataBrew dataset that exists in the current AWS account.

" + }, + "CreateProject":{ + "name":"CreateProject", + "http":{ + "method":"POST", + "requestUri":"/projects" + }, + "input":{"shape":"CreateProjectRequest"}, + "output":{"shape":"CreateProjectResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Creates a new AWS Glue DataBrew project in the current AWS account.

" + }, + "CreateRecipe":{ + "name":"CreateRecipe", + "http":{ + "method":"POST", + "requestUri":"/recipes" + }, + "input":{"shape":"CreateRecipeRequest"}, + "output":{"shape":"CreateRecipeResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Creates a new AWS Glue DataBrew recipe for the current AWS account.

" + }, + "CreateRecipeJob":{ + "name":"CreateRecipeJob", + "http":{ + "method":"POST", + "requestUri":"/recipeJobs" + }, + "input":{"shape":"CreateRecipeJobRequest"}, + "output":{"shape":"CreateRecipeJobResponse"}, + "errors":[ + {"shape":"ValidationException"} + ], + "documentation":"

Creates a new job for an existing AWS Glue DataBrew recipe in the current AWS account. You can create a standalone job using either a project, or a combination of a recipe and a dataset.

" + }, + "CreateSchedule":{ + "name":"CreateSchedule", + "http":{ + "method":"POST", + "requestUri":"/schedules" + }, + "input":{"shape":"CreateScheduleRequest"}, + "output":{"shape":"CreateScheduleResponse"}, + "errors":[ + {"shape":"ValidationException"} + ], + "documentation":"

Creates a new schedule for one or more AWS Glue DataBrew jobs. Jobs can be run at a specific date and time, or at regular intervals.

" + }, + "DeleteDataset":{ + "name":"DeleteDataset", + "http":{ + "method":"DELETE", + "requestUri":"/datasets/{name}" + }, + "input":{"shape":"DeleteDatasetRequest"}, + "output":{"shape":"DeleteDatasetResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Deletes a dataset from AWS Glue DataBrew.

" + }, + "DeleteJob":{ + "name":"DeleteJob", + "http":{ + "method":"DELETE", + "requestUri":"/jobs/{name}" + }, + "input":{"shape":"DeleteJobRequest"}, + "output":{"shape":"DeleteJobResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Deletes the specified AWS Glue DataBrew job from the current AWS account. The job can be for a recipe or for a profile.

" + }, + "DeleteProject":{ + "name":"DeleteProject", + "http":{ + "method":"DELETE", + "requestUri":"/projects/{name}" + }, + "input":{"shape":"DeleteProjectRequest"}, + "output":{"shape":"DeleteProjectResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Deletes an existing AWS Glue DataBrew project from the current AWS account.

" + }, + "DeleteRecipeVersion":{ + "name":"DeleteRecipeVersion", + "http":{ + "method":"DELETE", + "requestUri":"/recipes/{name}/recipeVersion/{recipeVersion}" + }, + "input":{"shape":"DeleteRecipeVersionRequest"}, + "output":{"shape":"DeleteRecipeVersionResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Deletes a single version of an AWS Glue DataBrew recipe.

" + }, + "DeleteSchedule":{ + "name":"DeleteSchedule", + "http":{ + "method":"DELETE", + "requestUri":"/schedules/{name}" + }, + "input":{"shape":"DeleteScheduleRequest"}, + "output":{"shape":"DeleteScheduleResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Deletes the specified AWS Glue DataBrew schedule from the current AWS account.

" + }, + "DescribeDataset":{ + "name":"DescribeDataset", + "http":{ + "method":"GET", + "requestUri":"/datasets/{name}" + }, + "input":{"shape":"DescribeDatasetRequest"}, + "output":{"shape":"DescribeDatasetResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns the definition of a specific AWS Glue DataBrew dataset that is in the current AWS account.

" + }, + "DescribeJob":{ + "name":"DescribeJob", + "http":{ + "method":"GET", + "requestUri":"/jobs/{name}" + }, + "input":{"shape":"DescribeJobRequest"}, + "output":{"shape":"DescribeJobResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns the definition of a specific AWS Glue DataBrew job that is in the current AWS account.

" + }, + "DescribeProject":{ + "name":"DescribeProject", + "http":{ + "method":"GET", + "requestUri":"/projects/{name}" + }, + "input":{"shape":"DescribeProjectRequest"}, + "output":{"shape":"DescribeProjectResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns the definition of a specific AWS Glue DataBrew project that is in the current AWS account.

" + }, + "DescribeRecipe":{ + "name":"DescribeRecipe", + "http":{ + "method":"GET", + "requestUri":"/recipes/{name}" + }, + "input":{"shape":"DescribeRecipeRequest"}, + "output":{"shape":"DescribeRecipeResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns the definition of a specific AWS Glue DataBrew recipe that is in the current AWS account.

" + }, + "DescribeSchedule":{ + "name":"DescribeSchedule", + "http":{ + "method":"GET", + "requestUri":"/schedules/{name}" + }, + "input":{"shape":"DescribeScheduleRequest"}, + "output":{"shape":"DescribeScheduleResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns the definition of a specific AWS Glue DataBrew schedule that is in the current AWS account.

" + }, + "ListDatasets":{ + "name":"ListDatasets", + "http":{ + "method":"GET", + "requestUri":"/datasets" + }, + "input":{"shape":"ListDatasetsRequest"}, + "output":{"shape":"ListDatasetsResponse"}, + "errors":[ + {"shape":"ValidationException"} + ], + "documentation":"

Lists all of the AWS Glue DataBrew datasets for the current AWS account.

" + }, + "ListJobRuns":{ + "name":"ListJobRuns", + "http":{ + "method":"GET", + "requestUri":"/jobs/{name}/jobRuns" + }, + "input":{"shape":"ListJobRunsRequest"}, + "output":{"shape":"ListJobRunsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Lists all of the previous runs of a particular AWS Glue DataBrew job in the current AWS account.

" + }, + "ListJobs":{ + "name":"ListJobs", + "http":{ + "method":"GET", + "requestUri":"/jobs" + }, + "input":{"shape":"ListJobsRequest"}, + "output":{"shape":"ListJobsResponse"}, + "errors":[ + {"shape":"ValidationException"} + ], + "documentation":"

Lists the AWS Glue DataBrew jobs in the current AWS account.

" + }, + "ListProjects":{ + "name":"ListProjects", + "http":{ + "method":"GET", + "requestUri":"/projects" + }, + "input":{"shape":"ListProjectsRequest"}, + "output":{"shape":"ListProjectsResponse"}, + "errors":[ + {"shape":"ValidationException"} + ], + "documentation":"

Lists all of the DataBrew projects in the current AWS account.

" + }, + "ListRecipeVersions":{ + "name":"ListRecipeVersions", + "http":{ + "method":"GET", + "requestUri":"/recipeVersions" + }, + "input":{"shape":"ListRecipeVersionsRequest"}, + "output":{"shape":"ListRecipeVersionsResponse"}, + "errors":[ + {"shape":"ValidationException"} + ], + "documentation":"

Lists all of the versions of a particular AWS Glue DataBrew recipe in the current AWS account.

" + }, + "ListRecipes":{ + "name":"ListRecipes", + "http":{ + "method":"GET", + "requestUri":"/recipes" + }, + "input":{"shape":"ListRecipesRequest"}, + "output":{"shape":"ListRecipesResponse"}, + "errors":[ + {"shape":"ValidationException"} + ], + "documentation":"

Lists all of the AWS Glue DataBrew recipes in the current AWS account.

" + }, + "ListSchedules":{ + "name":"ListSchedules", + "http":{ + "method":"GET", + "requestUri":"/schedules" + }, + "input":{"shape":"ListSchedulesRequest"}, + "output":{"shape":"ListSchedulesResponse"}, + "errors":[ + {"shape":"ValidationException"} + ], + "documentation":"

Lists the AWS Glue DataBrew schedules in the current AWS account.

" + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{ResourceArn}" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Lists all the tags for an AWS Glue DataBrew resource.

" + }, + "PublishRecipe":{ + "name":"PublishRecipe", + "http":{ + "method":"POST", + "requestUri":"/recipes/{name}/publishRecipe" + }, + "input":{"shape":"PublishRecipeRequest"}, + "output":{"shape":"PublishRecipeResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Publishes a new major version of an AWS Glue DataBrew recipe that exists in the current AWS account.

" + }, + "SendProjectSessionAction":{ + "name":"SendProjectSessionAction", + "http":{ + "method":"PUT", + "requestUri":"/projects/{name}/sendProjectSessionAction" + }, + "input":{"shape":"SendProjectSessionActionRequest"}, + "output":{"shape":"SendProjectSessionActionResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Performs a recipe step within an interactive AWS Glue DataBrew session that's currently open.

" + }, + "StartJobRun":{ + "name":"StartJobRun", + "http":{ + "method":"POST", + "requestUri":"/jobs/{name}/startJobRun" + }, + "input":{"shape":"StartJobRunRequest"}, + "output":{"shape":"StartJobRunResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Runs an AWS Glue DataBrew job that exists in the current AWS account.

" + }, + "StartProjectSession":{ + "name":"StartProjectSession", + "http":{ + "method":"PUT", + "requestUri":"/projects/{name}/startProjectSession" + }, + "input":{"shape":"StartProjectSessionRequest"}, + "output":{"shape":"StartProjectSessionResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Creates an interactive session, enabling you to manipulate an AWS Glue DataBrew project.

" + }, + "StopJobRun":{ + "name":"StopJobRun", + "http":{ + "method":"POST", + "requestUri":"/jobs/{name}/jobRun/{runId}/stopJobRun" + }, + "input":{"shape":"StopJobRunRequest"}, + "output":{"shape":"StopJobRunResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Stops the specified job from running in the current AWS account.

" + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{ResourceArn}" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Adds metadata tags to an AWS Glue DataBrew resource, such as a dataset, job, project, or recipe.

" + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{ResourceArn}" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Removes metadata tags from an AWS Glue DataBrew resource.

" + }, + "UpdateDataset":{ + "name":"UpdateDataset", + "http":{ + "method":"PUT", + "requestUri":"/datasets/{name}" + }, + "input":{"shape":"UpdateDatasetRequest"}, + "output":{"shape":"UpdateDatasetResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Modifies the definition of an existing AWS Glue DataBrew dataset in the current AWS account.

" + }, + "UpdateProfileJob":{ + "name":"UpdateProfileJob", + "http":{ + "method":"PUT", + "requestUri":"/profileJobs/{name}" + }, + "input":{"shape":"UpdateProfileJobRequest"}, + "output":{"shape":"UpdateProfileJobResponse"}, + "errors":[ + {"shape":"ValidationException"} + ], + "documentation":"

Modifies the definition of an existing AWS Glue DataBrew job in the current AWS account.

" + }, + "UpdateProject":{ + "name":"UpdateProject", + "http":{ + "method":"PUT", + "requestUri":"/projects/{name}" + }, + "input":{"shape":"UpdateProjectRequest"}, + "output":{"shape":"UpdateProjectResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Modifies the definition of an existing AWS Glue DataBrew project in the current AWS account.

" + }, + "UpdateRecipe":{ + "name":"UpdateRecipe", + "http":{ + "method":"PUT", + "requestUri":"/recipes/{name}" + }, + "input":{"shape":"UpdateRecipeRequest"}, + "output":{"shape":"UpdateRecipeResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Modifies the definition of the latest working version of an AWS Glue DataBrew recipe in the current AWS account.

" + }, + "UpdateRecipeJob":{ + "name":"UpdateRecipeJob", + "http":{ + "method":"PUT", + "requestUri":"/recipeJobs/{name}" + }, + "input":{"shape":"UpdateRecipeJobRequest"}, + "output":{"shape":"UpdateRecipeJobResponse"}, + "errors":[ + {"shape":"ValidationException"} + ], + "documentation":"

Modifies the definition of an existing AWS Glue DataBrew recipe job in the current AWS account.

" + }, + "UpdateSchedule":{ + "name":"UpdateSchedule", + "http":{ + "method":"PUT", + "requestUri":"/schedules/{name}" + }, + "input":{"shape":"UpdateScheduleRequest"}, + "output":{"shape":"UpdateScheduleResponse"}, + "errors":[ + {"shape":"ValidationException"} + ], + "documentation":"

Modifies the definition of an existing AWS Glue DataBrew schedule in the current AWS account.

" + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

Access to the specified resource was denied.

", + "error":{"httpStatusCode":403}, + "exception":true + }, + "AccountId":{ + "type":"string", + "max":255 + }, + "ActionId":{"type":"integer"}, + "Arn":{ + "type":"string", + "max":2048, + "min":20 + }, + "AssumeControl":{"type":"boolean"}, + "Attempt":{"type":"integer"}, + "BatchDeleteRecipeVersionRequest":{ + "type":"structure", + "required":[ + "Name", + "RecipeVersions" + ], + "members":{ + "Name":{ + "shape":"RecipeName", + "documentation":"

The name of the recipe to be modified.

", + "location":"uri", + "locationName":"name" + }, + "RecipeVersions":{ + "shape":"RecipeVersionList", + "documentation":"

An array of version identifiers to be deleted.

" + } + } + }, + "BatchDeleteRecipeVersionResponse":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"RecipeName", + "documentation":"

The name of the recipe that was modified.

" + }, + "Errors":{ + "shape":"RecipeErrorList", + "documentation":"

Errors, if any, that were encountered when deleting the recipe versions.

" + } + } + }, + "Bucket":{ + "type":"string", + "max":63, + "min":3 + }, + "CatalogId":{ + "type":"string", + "max":255, + "min":1 + }, + "ClientSessionId":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^[a-zA-Z0-9][a-zA-Z0-9-]*$" + }, + "ColumnName":{ + "type":"string", + "max":255, + "min":1 + }, + "ColumnNameList":{ + "type":"list", + "member":{"shape":"ColumnName"}, + "max":200 + }, + "ColumnRange":{ + "type":"integer", + "max":20, + "min":0 + }, + "CompressionFormat":{ + "type":"string", + "enum":[ + "GZIP", + "LZ4", + "SNAPPY", + "BZIP2", + "DEFLATE", + "LZO", + "BROTLI", + "ZSTD", + "ZLIB" + ] + }, + "Condition":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[A-Z\\_]+$" + }, + "ConditionExpression":{ + "type":"structure", + "required":[ + "Condition", + "TargetColumn" + ], + "members":{ + "Condition":{ + "shape":"Condition", + "documentation":"

A specific condition to apply to a recipe action. For more information, see Recipe structure in the AWS Glue DataBrew Developer Guide.

" + }, + "Value":{ + "shape":"ConditionValue", + "documentation":"

A value that the condition must evaluate to for the condition to succeed.

" + }, + "TargetColumn":{ + "shape":"TargetColumn", + "documentation":"

A column to apply this condition to, within an AWS Glue DataBrew dataset.

" + } + }, + "documentation":"

Represents an individual condition that evaluates to true or false.

Conditions are used with recipe actions: The action is only performed for column values where the condition evaluates to true.

If a recipe requires more than one condition, then the recipe must specify multiple ConditionExpression elements. Each condition is applied to the rows in a dataset first, before the recipe action is performed.

" + }, + "ConditionExpressionList":{ + "type":"list", + "member":{"shape":"ConditionExpression"} + }, + "ConditionValue":{ + "type":"string", + "max":1024 + }, + "ConflictException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

Updating or deleting a resource can cause an inconsistent state.

", + "error":{"httpStatusCode":409}, + "exception":true + }, + "CreateDatasetRequest":{ + "type":"structure", + "required":[ + "Name", + "Input" + ], + "members":{ + "Name":{ + "shape":"DatasetName", + "documentation":"

The name of the dataset to be created.

" + }, + "FormatOptions":{"shape":"FormatOptions"}, + "Input":{"shape":"Input"}, + "Tags":{ + "shape":"TagMap", + "documentation":"

Metadata tags to apply to this dataset.

" + } + } + }, + "CreateDatasetResponse":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"DatasetName", + "documentation":"

The name of the dataset that you created.

" + } + } + }, + "CreateProfileJobRequest":{ + "type":"structure", + "required":[ + "DatasetName", + "Name", + "OutputLocation", + "RoleArn" + ], + "members":{ + "DatasetName":{ + "shape":"DatasetName", + "documentation":"

The name of the dataset that this job is to act upon.

" + }, + "EncryptionKeyArn":{ + "shape":"EncryptionKeyArn", + "documentation":"

The Amazon Resource Name (ARN) of an encryption key that is used to protect the job.

" + }, + "EncryptionMode":{ + "shape":"EncryptionMode", + "documentation":"

The encryption mode for the job, which can be one of the following:

  • SSE-KMS - para>SSE-KMS - server-side encryption with AWS KMS-managed keys.

  • SSE-S3 - Server-side encryption with keys managed by Amazon S3.

" + }, + "Name":{ + "shape":"JobName", + "documentation":"

The name of the job to be created.

" + }, + "LogSubscription":{ + "shape":"LogSubscription", + "documentation":"

A value that enables or disables Amazon CloudWatch logging for the current AWS account. If logging is enabled, CloudWatch writes one log stream for each job run.

" + }, + "MaxCapacity":{ + "shape":"MaxCapacity", + "documentation":"

The maximum number of nodes that DataBrew can use when the job processes data.

" + }, + "MaxRetries":{ + "shape":"MaxRetries", + "documentation":"

The maximum number of times to retry the job after a job run fails.

" + }, + "OutputLocation":{"shape":"S3Location"}, + "RoleArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role to be assumed for this request.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

Metadata tags to apply to this job.

" + }, + "Timeout":{ + "shape":"Timeout", + "documentation":"

The job's timeout in minutes. A job that attempts to run longer than this timeout period ends with a status of TIMEOUT.

" + } + } + }, + "CreateProfileJobResponse":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"JobName", + "documentation":"

The name of the job that was created.

" + } + } + }, + "CreateProjectRequest":{ + "type":"structure", + "required":[ + "DatasetName", + "Name", + "RecipeName", + "RoleArn" + ], + "members":{ + "DatasetName":{ + "shape":"DatasetName", + "documentation":"

The name of the dataset to associate this project with.

" + }, + "Name":{ + "shape":"ProjectName", + "documentation":"

A unique name for the new project.

" + }, + "RecipeName":{ + "shape":"RecipeName", + "documentation":"

The name of an existing recipe to associate with the project.

" + }, + "Sample":{"shape":"Sample"}, + "RoleArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role to be assumed for this request.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

Metadata tags to apply to this project.

" + } + } + }, + "CreateProjectResponse":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"ProjectName", + "documentation":"

The name of the project that you created.

" + } + } + }, + "CreateRecipeJobRequest":{ + "type":"structure", + "required":[ + "Name", + "Outputs", + "RoleArn" + ], + "members":{ + "DatasetName":{ + "shape":"DatasetName", + "documentation":"

The name of the dataset that this job processes.

" + }, + "EncryptionKeyArn":{ + "shape":"EncryptionKeyArn", + "documentation":"

The Amazon Resource Name (ARN) of an encryption key that is used to protect the job.

" + }, + "EncryptionMode":{ + "shape":"EncryptionMode", + "documentation":"

The encryption mode for the job, which can be one of the following:

  • SSE-KMS - Server-side encryption with AWS KMS-managed keys.

  • SSE-S3 - Server-side encryption with keys managed by Amazon S3.

" + }, + "Name":{ + "shape":"JobName", + "documentation":"

A unique name for the job.

" + }, + "LogSubscription":{ + "shape":"LogSubscription", + "documentation":"

A value that enables or disables Amazon CloudWatch logging for the current AWS account. If logging is enabled, CloudWatch writes one log stream for each job run.

" + }, + "MaxCapacity":{ + "shape":"MaxCapacity", + "documentation":"

The maximum number of nodes that DataBrew can consume when the job processes data.

" + }, + "MaxRetries":{ + "shape":"MaxRetries", + "documentation":"

The maximum number of times to retry the job after a job run fails.

" + }, + "Outputs":{ + "shape":"OutputList", + "documentation":"

One or more artifacts that represent the output from running the job.

" + }, + "ProjectName":{ + "shape":"ProjectName", + "documentation":"

Either the name of an existing project, or a combination of a recipe and a dataset to associate with the recipe.

" + }, + "RecipeReference":{"shape":"RecipeReference"}, + "RoleArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role to be assumed for this request.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

Metadata tags to apply to this job dataset.

" + }, + "Timeout":{ + "shape":"Timeout", + "documentation":"

The job's timeout in minutes. A job that attempts to run longer than this timeout period ends with a status of TIMEOUT.

" + } + } + }, + "CreateRecipeJobResponse":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"JobName", + "documentation":"

The name of the job that you created.

" + } + } + }, + "CreateRecipeRequest":{ + "type":"structure", + "required":[ + "Name", + "Steps" + ], + "members":{ + "Description":{ + "shape":"RecipeDescription", + "documentation":"

A description for the recipe.

" + }, + "Name":{ + "shape":"RecipeName", + "documentation":"

A unique name for the recipe.

" + }, + "Steps":{ + "shape":"RecipeStepList", + "documentation":"

An array containing the steps to be performed by the recipe. Each recipe step consists of one recipe action and (optionally) an array of condition expressions.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

Metadata tags to apply to this recipe.

" + } + } + }, + "CreateRecipeResponse":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"RecipeName", + "documentation":"

The name of the recipe that you created.

" + } + } + }, + "CreateScheduleRequest":{ + "type":"structure", + "required":[ + "CronExpression", + "Name" + ], + "members":{ + "JobNames":{ + "shape":"JobNameList", + "documentation":"

The name or names of one or more jobs to be run.

" + }, + "CronExpression":{ + "shape":"CronExpression", + "documentation":"

The date or dates and time or times, in cron format, when the jobs are to be run.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

Metadata tags to apply to this schedule.

" + }, + "Name":{ + "shape":"ScheduleName", + "documentation":"

A unique name for the schedule.

" + } + } + }, + "CreateScheduleResponse":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"ScheduleName", + "documentation":"

The name of the schedule that was created.

" + } + } + }, + "CreatedBy":{"type":"string"}, + "CronExpression":{ + "type":"string", + "max":512, + "min":1 + }, + "DataCatalogInputDefinition":{ + "type":"structure", + "required":[ + "DatabaseName", + "TableName" + ], + "members":{ + "CatalogId":{ + "shape":"CatalogId", + "documentation":"

The unique identifier of the AWS account that holds the Data Catalog that stores the data.

" + }, + "DatabaseName":{ + "shape":"DatabaseName", + "documentation":"

The name of a database in the Data Catalog.

" + }, + "TableName":{ + "shape":"TableName", + "documentation":"

The name of a database table in the Data Catalog. This table corresponds to a DataBrew dataset.

" + }, + "TempDirectory":{ + "shape":"S3Location", + "documentation":"

An Amazon location that AWS Glue Data Catalog can use as a temporary directory.

" + } + }, + "documentation":"

Represents how metadata stored in the AWS Glue Data Catalog is defined in an AWS Glue DataBrew dataset.

" + }, + "DatabaseName":{ + "type":"string", + "max":255, + "min":1 + }, + "Dataset":{ + "type":"structure", + "required":[ + "Name", + "Input" + ], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

The ID of the AWS account that owns the dataset.

" + }, + "CreatedBy":{ + "shape":"CreatedBy", + "documentation":"

The identifier (the user name) of the user who created the dataset.

" + }, + "CreateDate":{ + "shape":"Date", + "documentation":"

The date and time that the dataset was created.

" + }, + "Name":{ + "shape":"DatasetName", + "documentation":"

The unique name of the dataset.

" + }, + "FormatOptions":{ + "shape":"FormatOptions", + "documentation":"

Options that define how DataBrew interprets the data in the dataset.

" + }, + "Input":{ + "shape":"Input", + "documentation":"

Information on how DataBrew can find the dataset, in either the AWS Glue Data Catalog or Amazon S3.

" + }, + "LastModifiedDate":{ + "shape":"Date", + "documentation":"

The last modification date and time of the dataset.

" + }, + "LastModifiedBy":{ + "shape":"LastModifiedBy", + "documentation":"

The identifier (the user name) of the user who last modified the dataset.

" + }, + "Source":{ + "shape":"Source", + "documentation":"

The location of the data for the dataset, either Amazon S3 or the AWS Glue Data Catalog.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

Metadata tags that have been applied to the dataset.

" + }, + "ResourceArn":{ + "shape":"Arn", + "documentation":"

The unique Amazon Resource Name (ARN) for the dataset.

" + } + }, + "documentation":"

Represents a dataset that can be processed by AWS Glue DataBrew.

" + }, + "DatasetList":{ + "type":"list", + "member":{"shape":"Dataset"} + }, + "DatasetName":{ + "type":"string", + "max":255, + "min":1 + }, + "Date":{"type":"timestamp"}, + "DeleteDatasetRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"DatasetName", + "documentation":"

The name of the dataset to be deleted.

", + "location":"uri", + "locationName":"name" + } + } + }, + "DeleteDatasetResponse":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"DatasetName", + "documentation":"

The name of the dataset that you deleted.

" + } + } + }, + "DeleteJobRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"JobName", + "documentation":"

The name of the job to be deleted.

", + "location":"uri", + "locationName":"name" + } + } + }, + "DeleteJobResponse":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"JobName", + "documentation":"

The name of the job that you deleted.

" + } + } + }, + "DeleteProjectRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"ProjectName", + "documentation":"

The name of the project to be deleted.

", + "location":"uri", + "locationName":"name" + } + } + }, + "DeleteProjectResponse":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"ProjectName", + "documentation":"

The name of the project that you deleted.

" + } + } + }, + "DeleteRecipeVersionRequest":{ + "type":"structure", + "required":[ + "Name", + "RecipeVersion" + ], + "members":{ + "Name":{ + "shape":"RecipeName", + "documentation":"

The name of the recipe to be deleted.

", + "location":"uri", + "locationName":"name" + }, + "RecipeVersion":{ + "shape":"RecipeVersion", + "documentation":"

The version of the recipe to be deleted.

", + "location":"uri", + "locationName":"recipeVersion" + } + } + }, + "DeleteRecipeVersionResponse":{ + "type":"structure", + "required":[ + "Name", + "RecipeVersion" + ], + "members":{ + "Name":{ + "shape":"RecipeName", + "documentation":"

The name of the recipe that was deleted.

" + }, + "RecipeVersion":{ + "shape":"RecipeVersion", + "documentation":"

The version of the recipe that was deleted.

" + } + } + }, + "DeleteScheduleRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"ScheduleName", + "documentation":"

The name of the schedule to be deleted.

", + "location":"uri", + "locationName":"name" + } + } + }, + "DeleteScheduleResponse":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"ScheduleName", + "documentation":"

The name of the schedule that was deleted.

" + } + } + }, + "DescribeDatasetRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"DatasetName", + "documentation":"

The name of the dataset to be described.

", + "location":"uri", + "locationName":"name" + } + } + }, + "DescribeDatasetResponse":{ + "type":"structure", + "required":[ + "Name", + "Input" + ], + "members":{ + "CreatedBy":{ + "shape":"CreatedBy", + "documentation":"

The identifier (user name) of the user who created the dataset.

" + }, + "CreateDate":{ + "shape":"Date", + "documentation":"

The date and time that the dataset was created.

" + }, + "Name":{ + "shape":"DatasetName", + "documentation":"

The name of the dataset.

" + }, + "FormatOptions":{"shape":"FormatOptions"}, + "Input":{"shape":"Input"}, + "LastModifiedDate":{ + "shape":"Date", + "documentation":"

The date and time that the dataset was last modified.

" + }, + "LastModifiedBy":{ + "shape":"LastModifiedBy", + "documentation":"

The identifier (user name) of the user who last modified the dataset.

" + }, + "Source":{ + "shape":"Source", + "documentation":"

The location of the data for this dataset, Amazon S3 or the AWS Glue Data Catalog.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

Metadata tags associated with this dataset.

" + }, + "ResourceArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the dataset.

" + } + } + }, + "DescribeJobRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"JobName", + "documentation":"

The name of the job to be described.

", + "location":"uri", + "locationName":"name" + } + } + }, + "DescribeJobResponse":{ + "type":"structure", + "required":["Name"], + "members":{ + "CreateDate":{ + "shape":"Date", + "documentation":"

The date and time that the job was created.

" + }, + "CreatedBy":{ + "shape":"CreatedBy", + "documentation":"

The identifier (user name) of the user associated with the creation of the job.

" + }, + "DatasetName":{ + "shape":"DatasetName", + "documentation":"

The dataset that the job acts upon.

" + }, + "EncryptionKeyArn":{ + "shape":"EncryptionKeyArn", + "documentation":"

The Amazon Resource Name (ARN) of an encryption key that is used to protect the job.

" + }, + "EncryptionMode":{ + "shape":"EncryptionMode", + "documentation":"

The encryption mode for the job, which can be one of the following:

  • SSE-KMS - Server-side encryption with AWS KMS-managed keys.

  • SSE-S3 - Server-side encryption with keys managed by Amazon S3.

" + }, + "Name":{ + "shape":"JobName", + "documentation":"

The name of the job.

" + }, + "Type":{ + "shape":"JobType", + "documentation":"

The job type, which must be one of the following:

  • PROFILE - The job analyzes the dataset to determine its size, data types, data distribution, and more.

  • RECIPE - The job applies one or more transformations to a dataset.

" + }, + "LastModifiedBy":{ + "shape":"LastModifiedBy", + "documentation":"

The identifier (user name) of the user who last modified the job.

" + }, + "LastModifiedDate":{ + "shape":"Date", + "documentation":"

The date and time that the job was last modified.

" + }, + "LogSubscription":{ + "shape":"LogSubscription", + "documentation":"

A value that indicates whether Amazon CloudWatch logging is enabled for this job.

" + }, + "MaxCapacity":{ + "shape":"MaxCapacity", + "documentation":"

The maximum number of nodes that AWS Glue DataBrew can consume when the job processes data.

" + }, + "MaxRetries":{ + "shape":"MaxRetries", + "documentation":"

The maximum number of times to retry the job after a job run fails.

" + }, + "Outputs":{ + "shape":"OutputList", + "documentation":"

One or more artifacts that represent the output from running the job.

" + }, + "ProjectName":{ + "shape":"ProjectName", + "documentation":"

The DataBrew project associated with this job.

" + }, + "RecipeReference":{"shape":"RecipeReference"}, + "ResourceArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the job.

" + }, + "RoleArn":{ + "shape":"Arn", + "documentation":"

The ARN of the AWS Identity and Access Management (IAM) role that was assumed for this request.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

Metadata tags associated with this job.

" + }, + "Timeout":{ + "shape":"Timeout", + "documentation":"

The job's timeout in minutes. A job that attempts to run longer than this timeout period ends with a status of TIMEOUT.

" + } + } + }, + "DescribeProjectRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"ProjectName", + "documentation":"

The name of the project to be described.

", + "location":"uri", + "locationName":"name" + } + } + }, + "DescribeProjectResponse":{ + "type":"structure", + "required":["Name"], + "members":{ + "CreateDate":{ + "shape":"Date", + "documentation":"

The date and time that the project was created.

" + }, + "CreatedBy":{ + "shape":"CreatedBy", + "documentation":"

The identifier (user name) of the user who created the project.

" + }, + "DatasetName":{ + "shape":"DatasetName", + "documentation":"

The dataset associated with the project.

" + }, + "LastModifiedDate":{ + "shape":"Date", + "documentation":"

The date and time that the project was last modified.

" + }, + "LastModifiedBy":{ + "shape":"LastModifiedBy", + "documentation":"

The identifier (user name) of the user who last modified the project.

" + }, + "Name":{ + "shape":"ProjectName", + "documentation":"

The name of the project.

" + }, + "RecipeName":{ + "shape":"RecipeName", + "documentation":"

The recipe associated with this job.

" + }, + "ResourceArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the project.

" + }, + "Sample":{"shape":"Sample"}, + "RoleArn":{ + "shape":"Arn", + "documentation":"

The ARN of the AWS Identity and Access Management (IAM) role that was assumed for this request.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

Metadata tags associated with this project.

" + }, + "SessionStatus":{ + "shape":"SessionStatus", + "documentation":"

Describes the current state of the session:

  • PROVISIONING - allocating resources for the session.

  • INITIALIZING - getting the session ready for first use.

  • ASSIGNED - the session is ready for use.

" + }, + "OpenedBy":{ + "shape":"OpenedBy", + "documentation":"

The identifier (user name) of the user that opened the project for use.

" + }, + "OpenDate":{ + "shape":"Date", + "documentation":"

The date and time when the project was opened.

" + } + } + }, + "DescribeRecipeRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"RecipeName", + "documentation":"

The name of the recipe to be described.

", + "location":"uri", + "locationName":"name" + }, + "RecipeVersion":{ + "shape":"RecipeVersion", + "documentation":"

The recipe version identifier. If this parameter isn't specified, then the latest published version is returned.

", + "location":"querystring", + "locationName":"recipeVersion" + } + } + }, + "DescribeRecipeResponse":{ + "type":"structure", + "required":["Name"], + "members":{ + "CreatedBy":{ + "shape":"CreatedBy", + "documentation":"

The identifier (user name) of the user who created the recipe.

" + }, + "CreateDate":{ + "shape":"Date", + "documentation":"

The date and time that the recipe was created.

" + }, + "LastModifiedBy":{ + "shape":"LastModifiedBy", + "documentation":"

The identifier (user name) of the user who last modified the recipe.

" + }, + "LastModifiedDate":{ + "shape":"Date", + "documentation":"

The date and time that the recipe was last modified.

" + }, + "ProjectName":{ + "shape":"ProjectName", + "documentation":"

The name of the project associated with this recipe.

" + }, + "PublishedBy":{ + "shape":"PublishedBy", + "documentation":"

The identifier (user name) of the user who last published the recipe.

" + }, + "PublishedDate":{ + "shape":"Date", + "documentation":"

The date and time when the recipe was last published.

" + }, + "Description":{ + "shape":"RecipeDescription", + "documentation":"

The description of the recipe.

" + }, + "Name":{ + "shape":"RecipeName", + "documentation":"

The name of the recipe.

" + }, + "Steps":{ + "shape":"RecipeStepList", + "documentation":"

One or more steps to be performed by the recipe. Each step consists of an action, and the conditions under which the action should succeed.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

Metadata tags associated with this project.

" + }, + "ResourceArn":{ + "shape":"Arn", + "documentation":"

The ARN of the recipe.

" + }, + "RecipeVersion":{ + "shape":"RecipeVersion", + "documentation":"

The recipe version identifier.

" + } + } + }, + "DescribeScheduleRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"ScheduleName", + "documentation":"

The name of the schedule to be described.

", + "location":"uri", + "locationName":"name" + } + } + }, + "DescribeScheduleResponse":{ + "type":"structure", + "required":["Name"], + "members":{ + "CreateDate":{ + "shape":"Date", + "documentation":"

The date and time that the schedule was created.

" + }, + "CreatedBy":{ + "shape":"CreatedBy", + "documentation":"

The identifier (user name) of the user who created the schedule.

" + }, + "JobNames":{ + "shape":"JobNameList", + "documentation":"

The name or names of one or more jobs to be run by using the schedule.

" + }, + "LastModifiedBy":{ + "shape":"LastModifiedBy", + "documentation":"

The identifier (user name) of the user who last modified the schedule.

" + }, + "LastModifiedDate":{ + "shape":"Date", + "documentation":"

The date and time that the schedule was last modified.

" + }, + "ResourceArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the schedule.

" + }, + "CronExpression":{ + "shape":"CronExpression", + "documentation":"

The date or dates and time or times, in cron format, when the jobs are to be run for the schedule.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

Metadata tags associated with this schedule.

" + }, + "Name":{ + "shape":"ScheduleName", + "documentation":"

The name of the schedule.

" + } + } + }, + "EncryptionKeyArn":{ + "type":"string", + "max":2048, + "min":20 + }, + "EncryptionMode":{ + "type":"string", + "enum":[ + "SSE-KMS", + "SSE-S3" + ] + }, + "ErrorCode":{ + "type":"string", + "pattern":"^[1-5][0-9][0-9]$" + }, + "ExcelOptions":{ + "type":"structure", + "members":{ + "SheetNames":{ + "shape":"SheetNameList", + "documentation":"

Specifies one or more named sheets in the Excel file, which will be included in the dataset.

" + }, + "SheetIndexes":{ + "shape":"SheetIndexList", + "documentation":"

Specifies one or more sheet numbers in the Excel file, which will be included in the dataset.

" + } + }, + "documentation":"

Options that define how DataBrew will interpret a Microsoft Excel file, when creating a dataset from that file.

" + }, + "ExecutionTime":{"type":"integer"}, + "FormatOptions":{ + "type":"structure", + "members":{ + "Json":{ + "shape":"JsonOptions", + "documentation":"

Options that define how JSON input is to be interpreted by DataBrew.

" + }, + "Excel":{ + "shape":"ExcelOptions", + "documentation":"

Options that define how Excel input is to be interpreted by DataBrew.

" + } + }, + "documentation":"

Options that define how Microsoft Excel input is to be interpreted by DataBrew.

" + }, + "HiddenColumnList":{ + "type":"list", + "member":{"shape":"ColumnName"} + }, + "Input":{ + "type":"structure", + "members":{ + "S3InputDefinition":{ + "shape":"S3Location", + "documentation":"

The Amazon S3 location where the data is stored.

" + }, + "DataCatalogInputDefinition":{ + "shape":"DataCatalogInputDefinition", + "documentation":"

The AWS Glue Data Catalog parameters for the data.

" + } + }, + "documentation":"

Information on how AWS Glue DataBrew can find data, in either the AWS Glue Data Catalog or Amazon S3.

" + }, + "InternalServerException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

An internal service failure occurred.

", + "error":{"httpStatusCode":500}, + "exception":true + }, + "Job":{ + "type":"structure", + "required":["Name"], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

The ID of the AWS account that owns the job.

" + }, + "CreatedBy":{ + "shape":"CreatedBy", + "documentation":"

The identifier (the user name) of the user who created the job.

" + }, + "CreateDate":{ + "shape":"Date", + "documentation":"

The date and time that the job was created.

" + }, + "DatasetName":{ + "shape":"DatasetName", + "documentation":"

A dataset that the job is to process.

" + }, + "EncryptionKeyArn":{ + "shape":"EncryptionKeyArn", + "documentation":"

The Amazon Resource Name (ARN) of an encryption key that is used to protect a job.

" + }, + "EncryptionMode":{ + "shape":"EncryptionMode", + "documentation":"

The encryption mode for the job, which can be one of the following:

  • SSE-KMS - Server-side encryption with AWS KMS-managed keys.

  • SSE-S3 - Server-side encryption with keys managed by Amazon S3.

" + }, + "Name":{ + "shape":"JobName", + "documentation":"

The unique name of the job.

" + }, + "Type":{ + "shape":"JobType", + "documentation":"

The job type of the job, which must be one of the following:

  • PROFILE - A job to analyze a dataset, to determine its size, data types, data distribution, and more.

  • RECIPE - A job to apply one or more transformations to a dataset.

" + }, + "LastModifiedBy":{ + "shape":"LastModifiedBy", + "documentation":"

The identifier (the user name) of the user who last modified the job.

" + }, + "LastModifiedDate":{ + "shape":"Date", + "documentation":"

The modification date and time of the job.

" + }, + "LogSubscription":{ + "shape":"LogSubscription", + "documentation":"

The current status of Amazon CloudWatch logging for the job.

" + }, + "MaxCapacity":{ + "shape":"MaxCapacity", + "documentation":"

The maximum number of nodes that can be consumed when the job processes data.

" + }, + "MaxRetries":{ + "shape":"MaxRetries", + "documentation":"

The maximum number of times to retry the job after a job run fails.

" + }, + "Outputs":{ + "shape":"OutputList", + "documentation":"

One or more artifacts that represent output from running the job.

" + }, + "ProjectName":{ + "shape":"ProjectName", + "documentation":"

The name of the project that the job is associated with.

" + }, + "RecipeReference":{ + "shape":"RecipeReference", + "documentation":"

A set of steps that the job runs.

" + }, + "ResourceArn":{ + "shape":"Arn", + "documentation":"

The unique Amazon Resource Name (ARN) for the job.

" + }, + "RoleArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the role that will be assumed for this job.

" + }, + "Timeout":{ + "shape":"Timeout", + "documentation":"

The job's timeout in minutes. A job that attempts to run longer than this timeout period ends with a status of TIMEOUT.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

Metadata tags that have been applied to the job.

" + } + }, + "documentation":"

Represents all of the attributes of an AWS Glue DataBrew job.

" + }, + "JobList":{ + "type":"list", + "member":{"shape":"Job"} + }, + "JobName":{ + "type":"string", + "max":240, + "min":1 + }, + "JobNameList":{ + "type":"list", + "member":{"shape":"JobName"}, + "max":50 + }, + "JobRun":{ + "type":"structure", + "members":{ + "Attempt":{ + "shape":"Attempt", + "documentation":"

The number of times that DataBrew has attempted to run the job.

" + }, + "CompletedOn":{ + "shape":"Date", + "documentation":"

The date and time when the job completed processing.

" + }, + "DatasetName":{ + "shape":"DatasetName", + "documentation":"

The name of the dataset for the job to process.

" + }, + "ErrorMessage":{ + "shape":"JobRunErrorMessage", + "documentation":"

A message indicating an error (if any) that was encountered when the job ran.

" + }, + "ExecutionTime":{ + "shape":"ExecutionTime", + "documentation":"

The amount of time, in seconds, during which a job run consumed resources.

" + }, + "JobName":{ + "shape":"JobName", + "documentation":"

The name of the job being processed during this run.

" + }, + "RunId":{ + "shape":"JobRunId", + "documentation":"

The unique identifier of the job run.

" + }, + "State":{ + "shape":"JobRunState", + "documentation":"

The current state of the job run entity itself.

" + }, + "LogSubscription":{ + "shape":"LogSubscription", + "documentation":"

The current status of Amazon CloudWatch logging for the job run.

" + }, + "LogGroupName":{ + "shape":"LogGroupName", + "documentation":"

The name of an Amazon CloudWatch log group, where the job writes diagnostic messages when it runs.

" + }, + "Outputs":{ + "shape":"OutputList", + "documentation":"

One or more output artifacts from a job run.

" + }, + "RecipeReference":{ + "shape":"RecipeReference", + "documentation":"

The set of steps processed by the job.

" + }, + "StartedBy":{ + "shape":"StartedBy", + "documentation":"

The identifier (the user name) of the user who initiated the job run.

" + }, + "StartedOn":{ + "shape":"Date", + "documentation":"

The date and time when the job run began.

" + } + }, + "documentation":"

Represents one run of an AWS Glue DataBrew job.

" + }, + "JobRunErrorMessage":{"type":"string"}, + "JobRunId":{ + "type":"string", + "max":255, + "min":1 + }, + "JobRunList":{ + "type":"list", + "member":{"shape":"JobRun"} + }, + "JobRunState":{ + "type":"string", + "enum":[ + "STARTING", + "RUNNING", + "STOPPING", + "STOPPED", + "SUCCEEDED", + "FAILED", + "TIMEOUT" + ] + }, + "JobType":{ + "type":"string", + "enum":[ + "PROFILE", + "RECIPE" + ] + }, + "JsonOptions":{ + "type":"structure", + "members":{ + "MultiLine":{ + "shape":"MultiLine", + "documentation":"

A value that specifies whether JSON input contains embedded new line characters.

" + } + }, + "documentation":"

Represents the JSON-specific options that define how input is to be interpreted by AWS Glue DataBrew.

" + }, + "Key":{ + "type":"string", + "max":1280, + "min":1 + }, + "LastModifiedBy":{"type":"string"}, + "ListDatasetsRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"MaxResults100", + "documentation":"

The maximum number of results to return in this request.

", + "location":"querystring", + "locationName":"maxResults" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A token generated by DataBrew that specifies where to continue pagination if a previous request was truncated. To get the next set of pages, pass in the NextToken value from the response object of the previous page call.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListDatasetsResponse":{ + "type":"structure", + "required":["Datasets"], + "members":{ + "Datasets":{ + "shape":"DatasetList", + "documentation":"

A list of datasets that are defined in the current AWS account.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A token generated by DataBrew that specifies where to continue pagination if a previous request was truncated. To obtain the next set of pages, pass in the NextToken from the response object of the previous page call.

" + } + } + }, + "ListJobRunsRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"JobName", + "documentation":"

The name of the job.

", + "location":"uri", + "locationName":"name" + }, + "MaxResults":{ + "shape":"MaxResults100", + "documentation":"

The maximum number of results to return in this request.

", + "location":"querystring", + "locationName":"maxResults" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A token generated by AWS Glue DataBrew that specifies where to continue pagination if a previous request was truncated. To get the next set of pages, pass in the NextToken value from the response object of the previous page call.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListJobRunsResponse":{ + "type":"structure", + "required":["JobRuns"], + "members":{ + "JobRuns":{ + "shape":"JobRunList", + "documentation":"

A list of job runs that have occurred for the specified job.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A token generated by DataBrew that specifies where to continue pagination if a previous request was truncated. To obtain the next set of pages, pass in the NextToken from the response object of the previous page call.

" + } + } + }, + "ListJobsRequest":{ + "type":"structure", + "members":{ + "DatasetName":{ + "shape":"DatasetName", + "documentation":"

The name of a dataset. Using this parameter indicates to return only those jobs that act on the specified dataset.

", + "location":"querystring", + "locationName":"datasetName" + }, + "MaxResults":{ + "shape":"MaxResults100", + "documentation":"

The maximum number of results to return in this request.

", + "location":"querystring", + "locationName":"maxResults" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A token generated by DataBrew that specifies where to continue pagination if a previous request was truncated. To get the next set of pages, pass in the NextToken value from the response object of the previous page call.

", + "location":"querystring", + "locationName":"nextToken" + }, + "ProjectName":{ + "shape":"ProjectName", + "documentation":"

The name of a project. Using this parameter indicates to return only those jobs that are associated with the specified project.

", + "location":"querystring", + "locationName":"projectName" + } + } + }, + "ListJobsResponse":{ + "type":"structure", + "required":["Jobs"], + "members":{ + "Jobs":{ + "shape":"JobList", + "documentation":"

A list of jobs that are defined in the current AWS account.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A token generated by DataBrew that specifies where to continue pagination if a previous request was truncated. To obtain the next set of pages, pass in the NextToken from the response object of the previous page call.

" + } + } + }, + "ListProjectsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

A pagination token that can be used in a subsequent request.

", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxResults100", + "documentation":"

The maximum number of results to return in this request.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListProjectsResponse":{ + "type":"structure", + "required":["Projects"], + "members":{ + "Projects":{ + "shape":"ProjectList", + "documentation":"

A list of projects that are defined in the current AWS account.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A token generated by DataBrew that specifies where to continue pagination if a previous request was truncated. To get the next set of pages, pass in the NextToken value from the response object of the previous page call.

" + } + } + }, + "ListRecipeVersionsRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "MaxResults":{ + "shape":"MaxResults100", + "documentation":"

The maximum number of results to return in this request.

", + "location":"querystring", + "locationName":"maxResults" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A pagination token that can be used in a subsequent request.

", + "location":"querystring", + "locationName":"nextToken" + }, + "Name":{ + "shape":"RecipeName", + "documentation":"

The name of the recipe for which to return version information.

", + "location":"querystring", + "locationName":"name" + } + } + }, + "ListRecipeVersionsResponse":{ + "type":"structure", + "required":["Recipes"], + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

A token generated by DataBrew that specifies where to continue pagination if a previous request was truncated. To get the next set of pages, pass in the NextToken value from the response object of the previous page call.

" + }, + "Recipes":{ + "shape":"RecipeList", + "documentation":"

A list of versions for the specified recipe.

" + } + } + }, + "ListRecipesRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"MaxResults100", + "documentation":"

The maximum number of results to return in this request.

", + "location":"querystring", + "locationName":"maxResults" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A pagination token that can be used in a subsequent request.

", + "location":"querystring", + "locationName":"nextToken" + }, + "RecipeVersion":{ + "shape":"RecipeVersion", + "documentation":"

A version identifier. Using this parameter indicates to return only those recipes that have this version identifier.

", + "location":"querystring", + "locationName":"recipeVersion" + } + } + }, + "ListRecipesResponse":{ + "type":"structure", + "required":["Recipes"], + "members":{ + "Recipes":{ + "shape":"RecipeList", + "documentation":"

A list of recipes that are defined in the current AWS account.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A token generated by DataBrew that specifies where to continue pagination if a previous request was truncated. To get the next set of pages, pass in the NextToken value from the response object of the previous page call.

" + } + } + }, + "ListSchedulesRequest":{ + "type":"structure", + "members":{ + "JobName":{ + "shape":"JobName", + "documentation":"

The name of the job that these schedules apply to.

", + "location":"querystring", + "locationName":"jobName" + }, + "MaxResults":{ + "shape":"MaxResults100", + "documentation":"

The maximum number of results to return in this request.

", + "location":"querystring", + "locationName":"maxResults" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A pagination token that can be used in a subsequent request.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListSchedulesResponse":{ + "type":"structure", + "required":["Schedules"], + "members":{ + "Schedules":{ + "shape":"ScheduleList", + "documentation":"

A list of schedules in the current AWS account.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A token generated by DataBrew that specifies where to continue pagination if a previous request was truncated. To get the next set of pages, pass in the NextToken value from the response object of the previous page call.

" + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) string that uniquely identifies the DataBrew resource.

", + "location":"uri", + "locationName":"ResourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"TagMap", + "documentation":"

A list of tags associated with the DataBrew resource.

" + } + } + }, + "LogGroupName":{ + "type":"string", + "max":512, + "min":1 + }, + "LogSubscription":{ + "type":"string", + "enum":[ + "ENABLE", + "DISABLE" + ] + }, + "MaxCapacity":{"type":"integer"}, + "MaxResults100":{ + "type":"integer", + "max":100, + "min":1 + }, + "MaxRetries":{ + "type":"integer", + "min":0 + }, + "Message":{"type":"string"}, + "MultiLine":{"type":"boolean"}, + "NextToken":{ + "type":"string", + "max":2000, + "min":1 + }, + "OpenedBy":{"type":"string"}, + "Operation":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[A-Z\\_]+$" + }, + "Output":{ + "type":"structure", + "required":["Location"], + "members":{ + "CompressionFormat":{ + "shape":"CompressionFormat", + "documentation":"

The compression algorithm used to compress the output text of the job.

" + }, + "Format":{ + "shape":"OutputFormat", + "documentation":"

The data format of the output of the job.

" + }, + "PartitionColumns":{ + "shape":"ColumnNameList", + "documentation":"

The names of one or more partition columns for the output of the job.

" + }, + "Location":{ + "shape":"S3Location", + "documentation":"

The location in Amazon S3 where the job writes its output.

" + }, + "Overwrite":{ + "shape":"OverwriteOutput", + "documentation":"

A value that, if true, means that any data in the location specified for output is overwritten with new output.

" + } + }, + "documentation":"

Represents individual output from a particular job run.

" + }, + "OutputFormat":{ + "type":"string", + "enum":[ + "CSV", + "JSON", + "PARQUET", + "GLUEPARQUET", + "AVRO", + "ORC", + "XML" + ] + }, + "OutputList":{ + "type":"list", + "member":{"shape":"Output"}, + "min":1 + }, + "OverwriteOutput":{"type":"boolean"}, + "ParameterMap":{ + "type":"map", + "key":{"shape":"ParameterName"}, + "value":{"shape":"ParameterValue"} + }, + "ParameterName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[A-Za-z0-9]+$" + }, + "ParameterValue":{ + "type":"string", + "max":8192, + "min":1 + }, + "Preview":{"type":"boolean"}, + "Project":{ + "type":"structure", + "required":[ + "Name", + "RecipeName" + ], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

The ID of the AWS account that owns the project.

" + }, + "CreateDate":{ + "shape":"Date", + "documentation":"

The date and time that the project was created.

" + }, + "CreatedBy":{ + "shape":"CreatedBy", + "documentation":"

The identifier (the user name) of the user who crated the project.

" + }, + "DatasetName":{ + "shape":"DatasetName", + "documentation":"

The dataset that the project is to act upon.

" + }, + "LastModifiedDate":{ + "shape":"Date", + "documentation":"

The last modification date and time for the project.

" + }, + "LastModifiedBy":{ + "shape":"LastModifiedBy", + "documentation":"

The identifier (user name) of the user who last modified the project.

" + }, + "Name":{ + "shape":"ProjectName", + "documentation":"

The unique name of a project.

" + }, + "RecipeName":{ + "shape":"RecipeName", + "documentation":"

The name of a recipe that will be developed during a project session.

" + }, + "ResourceArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) for the project.

" + }, + "Sample":{ + "shape":"Sample", + "documentation":"

The sample size and sampling type to apply to the data. If this parameter isn't specified, then the sample will consiste of the first 500 rows from the dataset.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

Metadata tags that have been applied to the project.

" + }, + "RoleArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the role that will be assumed for this project.

" + }, + "OpenedBy":{ + "shape":"OpenedBy", + "documentation":"

The identifier (the user name) of the user that opened the project for use.

" + }, + "OpenDate":{ + "shape":"Date", + "documentation":"

The date and time when the project was opened.

" + } + }, + "documentation":"

Represents all of the attributes of an AWS Glue DataBrew project.

" + }, + "ProjectList":{ + "type":"list", + "member":{"shape":"Project"} + }, + "ProjectName":{ + "type":"string", + "max":255, + "min":1 + }, + "PublishRecipeRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Description":{ + "shape":"RecipeDescription", + "documentation":"

A description of the recipe to be published, for this version of the recipe.

" + }, + "Name":{ + "shape":"RecipeName", + "documentation":"

The name of the recipe to be published.

", + "location":"uri", + "locationName":"name" + } + } + }, + "PublishRecipeResponse":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"RecipeName", + "documentation":"

The name of the recipe that you published.

" + } + } + }, + "PublishedBy":{"type":"string"}, + "Recipe":{ + "type":"structure", + "required":["Name"], + "members":{ + "CreatedBy":{ + "shape":"CreatedBy", + "documentation":"

The identifier (the user name) of the user who created the recipe.

" + }, + "CreateDate":{ + "shape":"Date", + "documentation":"

The date and time that the recipe was created.

" + }, + "LastModifiedBy":{ + "shape":"LastModifiedBy", + "documentation":"

The identifier (user name) of the user who last modified the recipe.

" + }, + "LastModifiedDate":{ + "shape":"Date", + "documentation":"

The last modification date and time of the recipe.

" + }, + "ProjectName":{ + "shape":"ProjectName", + "documentation":"

The name of the project that the recipe is associated with.

" + }, + "PublishedBy":{ + "shape":"PublishedBy", + "documentation":"

The identifier (the user name) of the user who published the recipe.

" + }, + "PublishedDate":{ + "shape":"Date", + "documentation":"

The date and time when the recipe was published.

" + }, + "Description":{ + "shape":"RecipeDescription", + "documentation":"

The description of the recipe.

" + }, + "Name":{ + "shape":"RecipeName", + "documentation":"

The unique name for the recipe.

" + }, + "ResourceArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) for the recipe.

" + }, + "Steps":{ + "shape":"RecipeStepList", + "documentation":"

A list of steps that are defined by the recipe.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

Metadata tags that have been applied to the recipe.

" + }, + "RecipeVersion":{ + "shape":"RecipeVersion", + "documentation":"

The identifier for the version for the recipe.

" + } + }, + "documentation":"

Represents one or more actions to be performed on an AWS Glue DataBrew dataset.

" + }, + "RecipeAction":{ + "type":"structure", + "required":["Operation"], + "members":{ + "Operation":{ + "shape":"Operation", + "documentation":"

The name of a valid DataBrew transformation to be performed on the data.

" + }, + "Parameters":{ + "shape":"ParameterMap", + "documentation":"

Contextual parameters for the transformation.

" + } + }, + "documentation":"

Represents a transformation and associated parameters that are used to apply a change to an AWS Glue DataBrew dataset. For more information, see Recipe structure and ecipe actions reference .

" + }, + "RecipeDescription":{ + "type":"string", + "max":1024 + }, + "RecipeErrorList":{ + "type":"list", + "member":{"shape":"RecipeVersionErrorDetail"} + }, + "RecipeErrorMessage":{"type":"string"}, + "RecipeList":{ + "type":"list", + "member":{"shape":"Recipe"} + }, + "RecipeName":{ + "type":"string", + "max":255, + "min":1 + }, + "RecipeReference":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"RecipeName", + "documentation":"

The name of the recipe.

" + }, + "RecipeVersion":{ + "shape":"RecipeVersion", + "documentation":"

The identifier for the version for the recipe.

" + } + }, + "documentation":"

Represents all of the attributes of an AWS Glue DataBrew recipe.

" + }, + "RecipeStep":{ + "type":"structure", + "required":["Action"], + "members":{ + "Action":{ + "shape":"RecipeAction", + "documentation":"

The particular action to be performed in the recipe step.

" + }, + "ConditionExpressions":{ + "shape":"ConditionExpressionList", + "documentation":"

One or more conditions that must be met, in order for the recipe step to succeed.

All of the conditions in the array must be met. In other words, all of the conditions must be combined using a logical AND operation.

" + } + }, + "documentation":"

Represents a single step to be performed in an AWS Glue DataBrew recipe.

" + }, + "RecipeStepList":{ + "type":"list", + "member":{"shape":"RecipeStep"} + }, + "RecipeVersion":{ + "type":"string", + "max":16, + "min":1 + }, + "RecipeVersionErrorDetail":{ + "type":"structure", + "members":{ + "ErrorCode":{ + "shape":"ErrorCode", + "documentation":"

The HTTP status code for the error.

" + }, + "ErrorMessage":{ + "shape":"RecipeErrorMessage", + "documentation":"

The text of the error message.

" + }, + "RecipeVersion":{ + "shape":"RecipeVersion", + "documentation":"

The identifier for the recipe version associated with this error.

" + } + }, + "documentation":"

Represents any errors encountered when attempting to delete multiple recipe versions.

" + }, + "RecipeVersionList":{ + "type":"list", + "member":{"shape":"RecipeVersion"}, + "max":50, + "min":1 + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

One or more resources can't be found.

", + "error":{"httpStatusCode":404}, + "exception":true + }, + "Result":{"type":"string"}, + "S3Location":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"Bucket", + "documentation":"

The S3 bucket name.

" + }, + "Key":{ + "shape":"Key", + "documentation":"

The unique name of the object in the bucket.

" + } + }, + "documentation":"

An Amazon S3 location (bucket name an object key) where DataBrew can read input data, or write output from a job.

" + }, + "Sample":{ + "type":"structure", + "required":["Type"], + "members":{ + "Size":{ + "shape":"SampleSize", + "documentation":"

The number of rows in the sample.

" + }, + "Type":{ + "shape":"SampleType", + "documentation":"

The way in which DataBrew obtains rows from a dataset.

" + } + }, + "documentation":"

Represents the sample size and sampling type for AWS Glue DataBrew to use for interactive data analysis.

" + }, + "SampleSize":{ + "type":"integer", + "max":5000, + "min":1 + }, + "SampleType":{ + "type":"string", + "enum":[ + "FIRST_N", + "LAST_N", + "RANDOM" + ] + }, + "Schedule":{ + "type":"structure", + "required":["Name"], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

The ID of the AWS account that owns the schedule.

" + }, + "CreatedBy":{ + "shape":"CreatedBy", + "documentation":"

The identifier (the user name) of the user who created the schedule.

" + }, + "CreateDate":{ + "shape":"Date", + "documentation":"

The date and time that the schedule was created.

" + }, + "JobNames":{ + "shape":"JobNameList", + "documentation":"

A list of jobs to be run, according to the schedule.

" + }, + "LastModifiedBy":{ + "shape":"LastModifiedBy", + "documentation":"

The identifier (the user name) of the user who last modified the schedule.

" + }, + "LastModifiedDate":{ + "shape":"Date", + "documentation":"

The date and time when the schedule was last modified.

" + }, + "ResourceArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the schedule.

" + }, + "CronExpression":{ + "shape":"CronExpression", + "documentation":"

The date(s) and time(s), in cron format, when the job will run.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

Metadata tags that have been applied to the schedule.

" + }, + "Name":{ + "shape":"ScheduleName", + "documentation":"

The name of the schedule.

" + } + }, + "documentation":"

Represents one or more dates and times when a job is to run.

" + }, + "ScheduleList":{ + "type":"list", + "member":{"shape":"Schedule"} + }, + "ScheduleName":{ + "type":"string", + "max":255, + "min":1 + }, + "SendProjectSessionActionRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Preview":{ + "shape":"Preview", + "documentation":"

Returns the result of the recipe step, without applying it. The result isn't added to the view frame stack.

" + }, + "Name":{ + "shape":"ProjectName", + "documentation":"

The name of the project to apply the action to.

", + "location":"uri", + "locationName":"name" + }, + "RecipeStep":{"shape":"RecipeStep"}, + "StepIndex":{ + "shape":"StepIndex", + "documentation":"

The index from which to preview a step. This index is used to preview the result of steps that have already been applied, so that the resulting view frame is from earlier in the view frame stack.

" + }, + "ClientSessionId":{ + "shape":"ClientSessionId", + "documentation":"

A unique identifier for an interactive session that's currently open and ready for work. The action will be performed on this session.

" + }, + "ViewFrame":{"shape":"ViewFrame"} + } + }, + "SendProjectSessionActionResponse":{ + "type":"structure", + "required":["Name"], + "members":{ + "Result":{ + "shape":"Result", + "documentation":"

A message indicating the result of performing the action.

" + }, + "Name":{ + "shape":"ProjectName", + "documentation":"

The name of the project that was affected by the action.

" + }, + "ActionId":{ + "shape":"ActionId", + "documentation":"

A unique identifier for the action that was performed.

" + } + } + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

A service quota is exceeded.

", + "error":{"httpStatusCode":402}, + "exception":true + }, + "SessionStatus":{ + "type":"string", + "enum":[ + "ASSIGNED", + "FAILED", + "INITIALIZING", + "PROVISIONING", + "READY", + "RECYCLING", + "ROTATING", + "TERMINATED", + "TERMINATING", + "UPDATING" + ] + }, + "SheetIndex":{ + "type":"integer", + "max":200, + "min":0 + }, + "SheetIndexList":{ + "type":"list", + "member":{"shape":"SheetIndex"}, + "max":1, + "min":1 + }, + "SheetName":{ + "type":"string", + "max":31, + "min":1 + }, + "SheetNameList":{ + "type":"list", + "member":{"shape":"SheetName"}, + "max":1, + "min":1 + }, + "Source":{ + "type":"string", + "enum":[ + "S3", + "DATA-CATALOG" + ] + }, + "StartColumnIndex":{ + "type":"integer", + "min":0 + }, + "StartJobRunRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"JobName", + "documentation":"

The name of the job to be run.

", + "location":"uri", + "locationName":"name" + } + } + }, + "StartJobRunResponse":{ + "type":"structure", + "required":["RunId"], + "members":{ + "RunId":{ + "shape":"JobRunId", + "documentation":"

A system-generated identifier for this particular job run.

" + } + } + }, + "StartProjectSessionRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"ProjectName", + "documentation":"

The name of the project to act upon.

", + "location":"uri", + "locationName":"name" + }, + "AssumeControl":{ + "shape":"AssumeControl", + "documentation":"

A value that, if true, enables you to take control of a session, even if a different client is currently accessing the project.

" + } + } + }, + "StartProjectSessionResponse":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"ProjectName", + "documentation":"

The name of the project to be acted upon.

" + }, + "ClientSessionId":{ + "shape":"ClientSessionId", + "documentation":"

A system-generated identifier for the session.

" + } + } + }, + "StartedBy":{"type":"string"}, + "StepIndex":{ + "type":"integer", + "min":0 + }, + "StopJobRunRequest":{ + "type":"structure", + "required":[ + "Name", + "RunId" + ], + "members":{ + "Name":{ + "shape":"JobName", + "documentation":"

The name of the job to be stopped.

", + "location":"uri", + "locationName":"name" + }, + "RunId":{ + "shape":"JobRunId", + "documentation":"

The ID of the job run to be stopped.

", + "location":"uri", + "locationName":"runId" + } + } + }, + "StopJobRunResponse":{ + "type":"structure", + "required":["RunId"], + "members":{ + "RunId":{ + "shape":"JobRunId", + "documentation":"

The ID of the job run that you stopped.

" + } + } + }, + "TableName":{ + "type":"string", + "max":255, + "min":1 + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":1 + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":200, + "min":1 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "Tags" + ], + "members":{ + "ResourceArn":{ + "shape":"Arn", + "documentation":"

The DataBrew resource to which tags should be added. The value for this parameter is an Amazon Resource Name (ARN). For DataBrew, you can tag a dataset, a job, a project, or a recipe.

", + "location":"uri", + "locationName":"ResourceArn" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

One or more tags to be assigned to the resource.

" + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256 + }, + "TargetColumn":{ + "type":"string", + "max":1024, + "min":1 + }, + "Timeout":{ + "type":"integer", + "min":0 + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "TagKeys" + ], + "members":{ + "ResourceArn":{ + "shape":"Arn", + "documentation":"

An DataBrew resource from which you want to remove a tag or tags. The value for this parameter is an Amazon Resource Name (ARN).

", + "location":"uri", + "locationName":"ResourceArn" + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

The tag keys (names) of one or more tags to be removed.

", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateDatasetRequest":{ + "type":"structure", + "required":[ + "Name", + "Input" + ], + "members":{ + "Name":{ + "shape":"DatasetName", + "documentation":"

The name of the dataset to be updated.

", + "location":"uri", + "locationName":"name" + }, + "FormatOptions":{"shape":"FormatOptions"}, + "Input":{"shape":"Input"} + } + }, + "UpdateDatasetResponse":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"DatasetName", + "documentation":"

The name of the dataset that you updated.

" + } + } + }, + "UpdateProfileJobRequest":{ + "type":"structure", + "required":[ + "Name", + "OutputLocation", + "RoleArn" + ], + "members":{ + "EncryptionKeyArn":{ + "shape":"EncryptionKeyArn", + "documentation":"

The Amazon Resource Name (ARN) of an encryption key that is used to protect the job.

" + }, + "EncryptionMode":{ + "shape":"EncryptionMode", + "documentation":"

The encryption mode for the job, which can be one of the following:

  • SSE-KMS - Server-side encryption with AWS KMS-managed keys.

  • SSE-S3 - Server-side encryption with keys managed by Amazon S3.

" + }, + "Name":{ + "shape":"JobName", + "documentation":"

The name of the job to be updated.

", + "location":"uri", + "locationName":"name" + }, + "LogSubscription":{ + "shape":"LogSubscription", + "documentation":"

A value that enables or disables Amazon CloudWatch logging for the current AWS account. If logging is enabled, CloudWatch writes one log stream for each job run.

" + }, + "MaxCapacity":{ + "shape":"MaxCapacity", + "documentation":"

The maximum number of nodes that DataBrew can use when the job processes data.

" + }, + "MaxRetries":{ + "shape":"MaxRetries", + "documentation":"

The maximum number of times to retry the job after a job run fails.

" + }, + "OutputLocation":{"shape":"S3Location"}, + "RoleArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role to be assumed for this request.

" + }, + "Timeout":{ + "shape":"Timeout", + "documentation":"

The job's timeout in minutes. A job that attempts to run longer than this timeout period ends with a status of TIMEOUT.

" + } + } + }, + "UpdateProfileJobResponse":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"JobName", + "documentation":"

The name of the job that was updated.

" + } + } + }, + "UpdateProjectRequest":{ + "type":"structure", + "required":[ + "RoleArn", + "Name" + ], + "members":{ + "Sample":{"shape":"Sample"}, + "RoleArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the IAM role to be assumed for this request.

" + }, + "Name":{ + "shape":"ProjectName", + "documentation":"

The name of the project to be updated.

", + "location":"uri", + "locationName":"name" + } + } + }, + "UpdateProjectResponse":{ + "type":"structure", + "required":["Name"], + "members":{ + "LastModifiedDate":{ + "shape":"Date", + "documentation":"

The date and time that the project was last modified.

" + }, + "Name":{ + "shape":"ProjectName", + "documentation":"

The name of the project that you updated.

" + } + } + }, + "UpdateRecipeJobRequest":{ + "type":"structure", + "required":[ + "Name", + "Outputs", + "RoleArn" + ], + "members":{ + "EncryptionKeyArn":{ + "shape":"EncryptionKeyArn", + "documentation":"

The Amazon Resource Name (ARN) of an encryption key that is used to protect the job.

" + }, + "EncryptionMode":{ + "shape":"EncryptionMode", + "documentation":"

The encryption mode for the job, which can be one of the following:

  • SSE-KMS - Server-side encryption with AWS KMS-managed keys.

  • SSE-S3 - Server-side encryption with keys managed by Amazon S3.

" + }, + "Name":{ + "shape":"JobName", + "documentation":"

The name of the job to update.

", + "location":"uri", + "locationName":"name" + }, + "LogSubscription":{ + "shape":"LogSubscription", + "documentation":"

A value that enables or disables Amazon CloudWatch logging for the current AWS account. If logging is enabled, CloudWatch writes one log stream for each job run.

" + }, + "MaxCapacity":{ + "shape":"MaxCapacity", + "documentation":"

The maximum number of nodes that DataBrew can consume when the job processes data.

" + }, + "MaxRetries":{ + "shape":"MaxRetries", + "documentation":"

The maximum number of times to retry the job after a job run fails.

" + }, + "Outputs":{ + "shape":"OutputList", + "documentation":"

One or more artifacts that represent the output from running the job.

" + }, + "RoleArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role to be assumed for this request.

" + }, + "Timeout":{ + "shape":"Timeout", + "documentation":"

The job's timeout in minutes. A job that attempts to run longer than this timeout period ends with a status of TIMEOUT.

" + } + } + }, + "UpdateRecipeJobResponse":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"JobName", + "documentation":"

The name of the job that you updated.

" + } + } + }, + "UpdateRecipeRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Description":{ + "shape":"RecipeDescription", + "documentation":"

A description of the recipe.

" + }, + "Name":{ + "shape":"RecipeName", + "documentation":"

The name of the recipe to be updated.

", + "location":"uri", + "locationName":"name" + }, + "Steps":{ + "shape":"RecipeStepList", + "documentation":"

One or more steps to be performed by the recipe. Each step consists of an action, and the conditions under which the action should succeed.

" + } + } + }, + "UpdateRecipeResponse":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"RecipeName", + "documentation":"

The name of the recipe that was updated.

" + } + } + }, + "UpdateScheduleRequest":{ + "type":"structure", + "required":[ + "CronExpression", + "Name" + ], + "members":{ + "JobNames":{ + "shape":"JobNameList", + "documentation":"

The name or names of one or more jobs to be run for this schedule.

" + }, + "CronExpression":{ + "shape":"CronExpression", + "documentation":"

The date or dates and time or times, in cron format, when the jobs are to be run.

" + }, + "Name":{ + "shape":"ScheduleName", + "documentation":"

The name of the schedule to update.

", + "location":"uri", + "locationName":"name" + } + } + }, + "UpdateScheduleResponse":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"ScheduleName", + "documentation":"

The name of the schedule that was updated.

" + } + } + }, + "ValidationException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

The input parameters for this request failed validation.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "ViewFrame":{ + "type":"structure", + "required":["StartColumnIndex"], + "members":{ + "StartColumnIndex":{ + "shape":"StartColumnIndex", + "documentation":"

The starting index for the range of columns to return in the view frame.

" + }, + "ColumnRange":{ + "shape":"ColumnRange", + "documentation":"

The number of columns to include in the view frame, beginning with the StartColumnIndex value and ignoring any columns in the HiddenColumns list.

" + }, + "HiddenColumns":{ + "shape":"HiddenColumnList", + "documentation":"

A list of columns to hide in the view frame.

" + } + }, + "documentation":"

Represents the data being being transformed during an AWS Glue DataBrew project session.

" + } + }, + "documentation":"

AWS Glue DataBrew is a visual, cloud-scale data-preparation service. DataBrew simplifies data preparation tasks, targeting data issues that are hard to spot and time-consuming to fix. DataBrew empowers users of all technical levels to visualize the data and perform one-click data transformations, with no coding required.

" +} diff --git a/services/dataexchange/pom.xml b/services/dataexchange/pom.xml index f1ed1a5f15fc..eed4708da8cf 100644 --- a/services/dataexchange/pom.xml +++ b/services/dataexchange/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT dataexchange AWS Java SDK :: Services :: DataExchange diff --git a/services/datapipeline/pom.xml b/services/datapipeline/pom.xml index 1ba202c4ead3..2611e46988b8 100644 --- a/services/datapipeline/pom.xml +++ b/services/datapipeline/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT datapipeline AWS Java SDK :: Services :: AWS Data Pipeline diff --git a/services/datasync/pom.xml b/services/datasync/pom.xml index 424b2c74ac62..975794aec362 100644 --- a/services/datasync/pom.xml +++ b/services/datasync/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT datasync AWS Java SDK :: Services :: DataSync diff --git a/services/datasync/src/main/resources/codegen-resources/service-2.json b/services/datasync/src/main/resources/codegen-resources/service-2.json index cab337102dd3..bfbe17ba627d 100644 --- a/services/datasync/src/main/resources/codegen-resources/service-2.json +++ b/services/datasync/src/main/resources/codegen-resources/service-2.json @@ -447,6 +447,20 @@ {"shape":"InternalException"} ], "documentation":"

Updates the metadata associated with a task.

" + }, + "UpdateTaskExecution":{ + "name":"UpdateTaskExecution", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateTaskExecutionRequest"}, + "output":{"shape":"UpdateTaskExecutionResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} + ], + "documentation":"

Updates execution of a task.

You can modify bandwidth throttling for a task execution that is running or queued. For more information, see Adjusting Bandwidth Throttling for a Task Execution.

The only Option that can be modified by UpdateTaskExecution is BytesPerSecond .

" } }, "shapes":{ @@ -657,7 +671,7 @@ "members":{ "Subdirectory":{ "shape":"NfsSubdirectory", - "documentation":"

The subdirectory in the NFS file system that is used to read data from the NFS source location or write data to the NFS destination. The NFS path should be a path that's exported by the NFS server, or a subdirectory of that path. The path should be such that it can be mounted by other NFS clients in your network.

To see all the paths exported by your NFS server. run \"showmount -e nfs-server-name\" from an NFS client that has access to your server. You can specify any directory that appears in the results, and any subdirectory of that directory. Ensure that the NFS export is accessible without Kerberos authentication.

To transfer all the data in the folder you specified, DataSync needs to have permissions to read all the data. To ensure this, either configure the NFS export with no_root_squash, or ensure that the permissions for all of the files that you want DataSync allow read access for all users. Doing either enables the agent to read the files. For the agent to access directories, you must additionally enable all execute access.

If you are copying data to or from your AWS Snowcone device, see NFS Server on AWS Snowcone for more information.

For information about NFS export configuration, see 18.7. The /etc/exports Configuration File in the Red Hat Enterprise Linux documentation.

" + "documentation":"

The subdirectory in the NFS file system that is used to read data from the NFS source location or write data to the NFS destination. The NFS path should be a path that's exported by the NFS server, or a subdirectory of that path. The path should be such that it can be mounted by other NFS clients in your network.

To see all the paths exported by your NFS server, run \"showmount -e nfs-server-name\" from an NFS client that has access to your server. You can specify any directory that appears in the results, and any subdirectory of that directory. Ensure that the NFS export is accessible without Kerberos authentication.

To transfer all the data in the folder you specified, DataSync needs to have permissions to read all the data. To ensure this, either configure the NFS export with no_root_squash, or ensure that the permissions for all of the files that you want DataSync allow read access for all users. Doing either enables the agent to read the files. For the agent to access directories, you must additionally enable all execute access.

If you are copying data to or from your AWS Snowcone device, see NFS Server on AWS Snowcone for more information.

For information about NFS export configuration, see 18.7. The /etc/exports Configuration File in the Red Hat Enterprise Linux documentation.

" }, "ServerHostname":{ "shape":"ServerHostname", @@ -758,16 +772,16 @@ }, "S3BucketArn":{ "shape":"S3BucketArn", - "documentation":"

The Amazon Resource Name (ARN) of the Amazon S3 bucket. If the bucket is on an AWS Outpost, this must be an access point ARN.

" + "documentation":"

The ARN of the Amazon S3 bucket. If the bucket is on an AWS Outpost, this must be an access point ARN.

" }, "S3StorageClass":{ "shape":"S3StorageClass", - "documentation":"

The Amazon S3 storage class that you want to store your files in when this location is used as a task destination. For buckets in AWS Regions, the storage class defaults to Standard. For buckets on AWS Outposts, the storage class defaults to AWS S3 Outposts.

For more information about S3 storage classes, see Amazon S3 Storage Classes in the Amazon Simple Storage Service Developer Guide. Some storage classes have behaviors that can affect your S3 storage cost. For detailed information, see using-storage-classes.

" + "documentation":"

The Amazon S3 storage class that you want to store your files in when this location is used as a task destination. For buckets in AWS Regions, the storage class defaults to Standard. For buckets on AWS Outposts, the storage class defaults to AWS S3 Outposts.

For more information about S3 storage classes, see Amazon S3 Storage Classes. Some storage classes have behaviors that can affect your S3 storage cost. For detailed information, see using-storage-classes.

" }, "S3Config":{"shape":"S3Config"}, "AgentArns":{ "shape":"AgentArnList", - "documentation":"

If you are using DataSync on an AWS Outpost, specify the Amazon Resource Names (ARNs) of the DataSync agents deployed on your AWS Outpost. For more information about launching a DataSync agent on an Amazon Outpost, see outposts-agent.

" + "documentation":"

If you are using DataSync on an AWS Outpost, specify the Amazon Resource Names (ARNs) of the DataSync agents deployed on your Outpost. For more information about launching a DataSync agent on an AWS Outpost, see outposts-agent.

" }, "Tags":{ "shape":"InputTagList", @@ -870,7 +884,7 @@ }, "Excludes":{ "shape":"FilterList", - "documentation":"

A list of filter rules that determines which files to exclude from a task. The list should contain a single filter string that consists of the patterns to exclude. The patterns are delimited by \"|\" (that is, a pipe), for example, \"/folder1|/folder2\"

" + "documentation":"

A list of filter rules that determines which files to exclude from a task. The list should contain a single filter string that consists of the patterns to exclude. The patterns are delimited by \"|\" (that is, a pipe), for example, \"/folder1|/folder2\".

" }, "Schedule":{ "shape":"TaskSchedule", @@ -1158,12 +1172,12 @@ }, "S3StorageClass":{ "shape":"S3StorageClass", - "documentation":"

The Amazon S3 storage class that you chose to store your files in when this location is used as a task destination. For more information about S3 storage classes, see Amazon S3 Storage Classes in the Amazon Simple Storage Service Developer Guide. Some storage classes have behaviors that can affect your S3 storage cost. For detailed information, see using-storage-classes.

" + "documentation":"

The Amazon S3 storage class that you chose to store your files in when this location is used as a task destination. For more information about S3 storage classes, see Amazon S3 Storage Classes. Some storage classes have behaviors that can affect your S3 storage cost. For detailed information, see using-storage-classes.

" }, "S3Config":{"shape":"S3Config"}, "AgentArns":{ "shape":"AgentArnList", - "documentation":"

If you are using DataSync on an Amazon Outpost, the Amazon Resource Name (ARNs) of the EC2 agents deployed on your AWS Outpost. For more information about launching a DataSync agent on an Amazon Outpost, see outposts-agent.

" + "documentation":"

If you are using DataSync on an AWS Outpost, the Amazon Resource Name (ARNs) of the EC2 agents deployed on your Outpost. For more information about launching a DataSync agent on an AWS Outpost, see outposts-agent.

" }, "CreationTime":{ "shape":"Time", @@ -1299,7 +1313,7 @@ }, "Status":{ "shape":"TaskStatus", - "documentation":"

The status of the task that was described.

For detailed information about task execution statuses, see Understanding Task Statuses in the AWS DataSync User Guide.

" + "documentation":"

The status of the task that was described.

For detailed information about task execution statuses, see Understanding Task Statuses in the AWS DataSync User Guide.

" }, "Name":{ "shape":"TagValue", @@ -2205,7 +2219,7 @@ }, "TransferStatus":{ "shape":"PhaseStatus", - "documentation":"

The status of the TRANSFERRING Phase.

" + "documentation":"

The status of the TRANSFERRING phase.

" }, "VerifyDuration":{ "shape":"Duration", @@ -2213,7 +2227,7 @@ }, "VerifyStatus":{ "shape":"PhaseStatus", - "documentation":"

The status of the VERIFYING Phase.

" + "documentation":"

The status of the VERIFYING phase.

" }, "ErrorCode":{ "shape":"string", @@ -2382,6 +2396,25 @@ "members":{ } }, + "UpdateTaskExecutionRequest":{ + "type":"structure", + "required":[ + "TaskExecutionArn", + "Options" + ], + "members":{ + "TaskExecutionArn":{ + "shape":"TaskExecutionArn", + "documentation":"

The Amazon Resource Name (ARN) of the specific task execution that is being updated.

" + }, + "Options":{"shape":"Options"} + } + }, + "UpdateTaskExecutionResponse":{ + "type":"structure", + "members":{ + } + }, "UpdateTaskRequest":{ "type":"structure", "required":["TaskArn"], diff --git a/services/dax/pom.xml b/services/dax/pom.xml index d902de431384..e09af90c29ef 100644 --- a/services/dax/pom.xml +++ b/services/dax/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT dax AWS Java SDK :: Services :: Amazon DynamoDB Accelerator (DAX) diff --git a/services/detective/pom.xml b/services/detective/pom.xml index eb048b3a5278..2469f52a380b 100644 --- a/services/detective/pom.xml +++ b/services/detective/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT detective AWS Java SDK :: Services :: Detective diff --git a/services/devicefarm/pom.xml b/services/devicefarm/pom.xml index 38cc5500622b..b8b936f74573 100644 --- a/services/devicefarm/pom.xml +++ b/services/devicefarm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT devicefarm AWS Java SDK :: Services :: AWS Device Farm diff --git a/services/devopsguru/pom.xml b/services/devopsguru/pom.xml new file mode 100644 index 000000000000..05416cd8ecff --- /dev/null +++ b/services/devopsguru/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.40-SNAPSHOT + + devopsguru + AWS Java SDK :: Services :: Dev Ops Guru + The AWS Java SDK for Dev Ops Guru module holds the client classes that are used for + communicating with Dev Ops Guru. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.devopsguru + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/devopsguru/src/main/resources/codegen-resources/paginators-1.json b/services/devopsguru/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..0360337d2f15 --- /dev/null +++ b/services/devopsguru/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,64 @@ +{ + "pagination": { + "DescribeResourceCollectionHealth": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": [ + "CloudFormation" + ] + }, + "GetResourceCollection": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": [ + "ResourceCollection.CloudFormation.StackNames" + ], + "non_aggregate_keys": [ + "ResourceCollection" + ] + }, + "ListAnomaliesForInsight": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": [ + "ReactiveAnomalies", + "ProactiveAnomalies" + ] + }, + "ListEvents": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Events" + }, + "ListInsights": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": [ + "ProactiveInsights", + "ReactiveInsights" + ] + }, + "ListNotificationChannels": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "Channels" + }, + "ListRecommendations": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "Recommendations" + }, + "SearchInsights": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": [ + "ProactiveInsights", + "ReactiveInsights" + ] + } + } +} \ No newline at end of file diff --git a/services/devopsguru/src/main/resources/codegen-resources/service-2.json b/services/devopsguru/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..332959980326 --- /dev/null +++ b/services/devopsguru/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,1951 @@ +{ + "metadata" : { + "apiVersion" : "2020-12-01", + "endpointPrefix" : "devops-guru", + "signingName" : "devops-guru", + "serviceFullName" : "Amazon DevOps Guru", + "serviceId" : "DevOps Guru", + "protocol" : "rest-json", + "jsonVersion" : "1.1", + "uid" : "devops-guru-2020-12-01", + "signatureVersion" : "v4" + }, + "operations" : { + "AddNotificationChannel" : { + "name" : "AddNotificationChannel", + "http" : { + "method" : "PUT", + "requestUri" : "/channels", + "responseCode" : 200 + }, + "input" : { + "shape" : "AddNotificationChannelRequest" + }, + "output" : { + "shape" : "AddNotificationChannelResponse" + }, + "errors" : [ { + "shape" : "ValidationException" + }, { + "shape" : "InternalServerException" + }, { + "shape" : "ServiceQuotaExceededException" + }, { + "shape" : "AccessDeniedException" + }, { + "shape" : "ResourceNotFoundException" + }, { + "shape" : "ThrottlingException" + }, { + "shape" : "ConflictException" + } ] + }, + "DescribeAccountHealth" : { + "name" : "DescribeAccountHealth", + "http" : { + "method" : "GET", + "requestUri" : "/accounts/health", + "responseCode" : 200 + }, + "input" : { + "shape" : "DescribeAccountHealthRequest" + }, + "output" : { + "shape" : "DescribeAccountHealthResponse" + }, + "errors" : [ { + "shape" : "ThrottlingException" + }, { + "shape" : "ValidationException" + }, { + "shape" : "InternalServerException" + }, { + "shape" : "AccessDeniedException" + } ] + }, + "DescribeAccountOverview" : { + "name" : "DescribeAccountOverview", + "http" : { + "method" : "POST", + "requestUri" : "/accounts/overview", + "responseCode" : 200 + }, + "input" : { + "shape" : "DescribeAccountOverviewRequest" + }, + "output" : { + "shape" : "DescribeAccountOverviewResponse" + }, + "errors" : [ { + "shape" : "ThrottlingException" + }, { + "shape" : "ValidationException" + }, { + "shape" : "InternalServerException" + }, { + "shape" : "AccessDeniedException" + } ] + }, + "DescribeAnomaly" : { + "name" : "DescribeAnomaly", + "http" : { + "method" : "GET", + "requestUri" : "/anomalies/{Id}", + "responseCode" : 200 + }, + "input" : { + "shape" : "DescribeAnomalyRequest" + }, + "output" : { + "shape" : "DescribeAnomalyResponse" + }, + "errors" : [ { + "shape" : "ResourceNotFoundException" + }, { + "shape" : "ThrottlingException" + }, { + "shape" : "ValidationException" + }, { + "shape" : "InternalServerException" + }, { + "shape" : "AccessDeniedException" + } ] + }, + "DescribeInsight" : { + "name" : "DescribeInsight", + "http" : { + "method" : "GET", + "requestUri" : "/insights/{Id}", + "responseCode" : 200 + }, + "input" : { + "shape" : "DescribeInsightRequest" + }, + "output" : { + "shape" : "DescribeInsightResponse" + }, + "errors" : [ { + "shape" : "ResourceNotFoundException" + }, { + "shape" : "ThrottlingException" + }, { + "shape" : "ValidationException" + }, { + "shape" : "InternalServerException" + }, { + "shape" : "AccessDeniedException" + } ] + }, + "DescribeResourceCollectionHealth" : { + "name" : "DescribeResourceCollectionHealth", + "http" : { + "method" : "GET", + "requestUri" : "/accounts/health/resource-collection/{ResourceCollectionType}", + "responseCode" : 200 + }, + "input" : { + "shape" : "DescribeResourceCollectionHealthRequest" + }, + "output" : { + "shape" : "DescribeResourceCollectionHealthResponse" + }, + "errors" : [ { + "shape" : "ThrottlingException" + }, { + "shape" : "ValidationException" + }, { + "shape" : "InternalServerException" + }, { + "shape" : "AccessDeniedException" + } ] + }, + "DescribeServiceIntegration" : { + "name" : "DescribeServiceIntegration", + "http" : { + "method" : "GET", + "requestUri" : "/service-integrations", + "responseCode" : 200 + }, + "input" : { + "shape" : "DescribeServiceIntegrationRequest" + }, + "output" : { + "shape" : "DescribeServiceIntegrationResponse" + }, + "errors" : [ { + "shape" : "ThrottlingException" + }, { + "shape" : "ValidationException" + }, { + "shape" : "InternalServerException" + }, { + "shape" : "AccessDeniedException" + } ] + }, + "GetResourceCollection" : { + "name" : "GetResourceCollection", + "http" : { + "method" : "GET", + "requestUri" : "/resource-collections/{ResourceCollectionType}", + "responseCode" : 200 + }, + "input" : { + "shape" : "GetResourceCollectionRequest" + }, + "output" : { + "shape" : "GetResourceCollectionResponse" + }, + "errors" : [ { + "shape" : "ResourceNotFoundException" + }, { + "shape" : "ThrottlingException" + }, { + "shape" : "ValidationException" + }, { + "shape" : "InternalServerException" + }, { + "shape" : "AccessDeniedException" + } ] + }, + "ListAnomaliesForInsight" : { + "name" : "ListAnomaliesForInsight", + "http" : { + "method" : "POST", + "requestUri" : "/anomalies/insight/{InsightId}", + "responseCode" : 200 + }, + "input" : { + "shape" : "ListAnomaliesForInsightRequest" + }, + "output" : { + "shape" : "ListAnomaliesForInsightResponse" + }, + "errors" : [ { + "shape" : "ResourceNotFoundException" + }, { + "shape" : "ThrottlingException" + }, { + "shape" : "ValidationException" + }, { + "shape" : "InternalServerException" + }, { + "shape" : "AccessDeniedException" + } ] + }, + "ListEvents" : { + "name" : "ListEvents", + "http" : { + "method" : "POST", + "requestUri" : "/events", + "responseCode" : 200 + }, + "input" : { + "shape" : "ListEventsRequest" + }, + "output" : { + "shape" : "ListEventsResponse" + }, + "errors" : [ { + "shape" : "ResourceNotFoundException" + }, { + "shape" : "ThrottlingException" + }, { + "shape" : "ValidationException" + }, { + "shape" : "InternalServerException" + }, { + "shape" : "AccessDeniedException" + } ] + }, + "ListInsights" : { + "name" : "ListInsights", + "http" : { + "method" : "POST", + "requestUri" : "/insights", + "responseCode" : 200 + }, + "input" : { + "shape" : "ListInsightsRequest" + }, + "output" : { + "shape" : "ListInsightsResponse" + }, + "errors" : [ { + "shape" : "ThrottlingException" + }, { + "shape" : "ValidationException" + }, { + "shape" : "InternalServerException" + }, { + "shape" : "AccessDeniedException" + } ] + }, + "ListNotificationChannels" : { + "name" : "ListNotificationChannels", + "http" : { + "method" : "POST", + "requestUri" : "/channels", + "responseCode" : 200 + }, + "input" : { + "shape" : "ListNotificationChannelsRequest" + }, + "output" : { + "shape" : "ListNotificationChannelsResponse" + }, + "errors" : [ { + "shape" : "ThrottlingException" + }, { + "shape" : "ValidationException" + }, { + "shape" : "InternalServerException" + }, { + "shape" : "AccessDeniedException" + } ] + }, + "ListRecommendations" : { + "name" : "ListRecommendations", + "http" : { + "method" : "POST", + "requestUri" : "/recommendations", + "responseCode" : 200 + }, + "input" : { + "shape" : "ListRecommendationsRequest" + }, + "output" : { + "shape" : "ListRecommendationsResponse" + }, + "errors" : [ { + "shape" : "ResourceNotFoundException" + }, { + "shape" : "ThrottlingException" + }, { + "shape" : "ValidationException" + }, { + "shape" : "InternalServerException" + }, { + "shape" : "AccessDeniedException" + } ] + }, + "PutFeedback" : { + "name" : "PutFeedback", + "http" : { + "method" : "PUT", + "requestUri" : "/feedback", + "responseCode" : 200 + }, + "input" : { + "shape" : "PutFeedbackRequest" + }, + "output" : { + "shape" : "PutFeedbackResponse" + }, + "errors" : [ { + "shape" : "ValidationException" + }, { + "shape" : "InternalServerException" + }, { + "shape" : "AccessDeniedException" + }, { + "shape" : "ResourceNotFoundException" + }, { + "shape" : "ThrottlingException" + }, { + "shape" : "ConflictException" + } ] + }, + "RemoveNotificationChannel" : { + "name" : "RemoveNotificationChannel", + "http" : { + "method" : "DELETE", + "requestUri" : "/channels/{Id}", + "responseCode" : 200 + }, + "input" : { + "shape" : "RemoveNotificationChannelRequest" + }, + "output" : { + "shape" : "RemoveNotificationChannelResponse" + }, + "errors" : [ { + "shape" : "ValidationException" + }, { + "shape" : "InternalServerException" + }, { + "shape" : "AccessDeniedException" + }, { + "shape" : "ResourceNotFoundException" + }, { + "shape" : "ThrottlingException" + }, { + "shape" : "ConflictException" + } ] + }, + "SearchInsights" : { + "name" : "SearchInsights", + "http" : { + "method" : "POST", + "requestUri" : "/insights/search", + "responseCode" : 200 + }, + "input" : { + "shape" : "SearchInsightsRequest" + }, + "output" : { + "shape" : "SearchInsightsResponse" + }, + "errors" : [ { + "shape" : "ThrottlingException" + }, { + "shape" : "ValidationException" + }, { + "shape" : "InternalServerException" + }, { + "shape" : "AccessDeniedException" + } ] + }, + "UpdateResourceCollection" : { + "name" : "UpdateResourceCollection", + "http" : { + "method" : "PUT", + "requestUri" : "/resource-collections", + "responseCode" : 200 + }, + "input" : { + "shape" : "UpdateResourceCollectionRequest" + }, + "output" : { + "shape" : "UpdateResourceCollectionResponse" + }, + "errors" : [ { + "shape" : "ThrottlingException" + }, { + "shape" : "ValidationException" + }, { + "shape" : "InternalServerException" + }, { + "shape" : "ConflictException" + }, { + "shape" : "AccessDeniedException" + } ] + }, + "UpdateServiceIntegration" : { + "name" : "UpdateServiceIntegration", + "http" : { + "method" : "PUT", + "requestUri" : "/service-integrations", + "responseCode" : 200 + }, + "input" : { + "shape" : "UpdateServiceIntegrationRequest" + }, + "output" : { + "shape" : "UpdateServiceIntegrationResponse" + }, + "errors" : [ { + "shape" : "ThrottlingException" + }, { + "shape" : "ValidationException" + }, { + "shape" : "InternalServerException" + }, { + "shape" : "ConflictException" + }, { + "shape" : "AccessDeniedException" + } ] + } + }, + "shapes" : { + "AccessDeniedException" : { + "type" : "structure", + "members" : { + "Message" : { + "shape" : "__string" + } + }, + "required" : [ "Message" ], + "exception" : true, + "error" : { + "httpStatusCode" : 403 + } + }, + "AddNotificationChannelRequest" : { + "type" : "structure", + "members" : { + "Config" : { + "shape" : "NotificationChannelConfig" + } + }, + "required" : [ "Config" ] + }, + "AddNotificationChannelResponse" : { + "type" : "structure", + "members" : { + "Id" : { + "shape" : "__stringMin36Max36PatternAF098AF094AF094AF094AF0912" + } + } + }, + "AnomalySeverity" : { + "type" : "string", + "enum" : [ "LOW", "MEDIUM", "HIGH" ] + }, + "AnomalySourceDetails" : { + "type" : "structure", + "members" : { + "CloudWatchMetrics" : { + "shape" : "__listOfCloudWatchMetricsDetail" + } + } + }, + "AnomalyStatus" : { + "type" : "string", + "enum" : [ "ONGOING", "CLOSED" ] + }, + "AnomalyTimeRange" : { + "type" : "structure", + "members" : { + "EndTime" : { + "shape" : "__timestampUnix" + }, + "StartTime" : { + "shape" : "__timestampUnix" + } + }, + "required" : [ "StartTime" ] + }, + "CloudFormationCollection" : { + "type" : "structure", + "members" : { + "StackNames" : { + "shape" : "__listOf__stringMin1Max128PatternAZAZAZAZ09" + } + } + }, + "CloudFormationCollectionFilter" : { + "type" : "structure", + "members" : { + "StackNames" : { + "shape" : "__listOf__stringMin1Max128PatternAZAZAZAZ09" + } + } + }, + "CloudFormationHealth" : { + "type" : "structure", + "members" : { + "Insight" : { + "shape" : "InsightHealth" + }, + "StackName" : { + "shape" : "__stringMin1Max128PatternAZAZAZAZ09" + } + } + }, + "CloudWatchMetricsDetail" : { + "type" : "structure", + "members" : { + "Dimensions" : { + "shape" : "__listOfCloudWatchMetricsDimension" + }, + "MetricName" : { + "shape" : "__string" + }, + "Namespace" : { + "shape" : "__string" + }, + "Period" : { + "shape" : "__integer" + }, + "Stat" : { + "shape" : "CloudWatchMetricsStat" + }, + "Unit" : { + "shape" : "__string" + } + } + }, + "CloudWatchMetricsDimension" : { + "type" : "structure", + "members" : { + "Name" : { + "shape" : "__string" + }, + "Value" : { + "shape" : "__string" + } + } + }, + "CloudWatchMetricsStat" : { + "type" : "string", + "enum" : [ "Sum", "Average", "SampleCount", "Minimum", "Maximum", "p99", "p90", "p50" ] + }, + "ConflictException" : { + "type" : "structure", + "members" : { + "Message" : { + "shape" : "__string" + }, + "ResourceId" : { + "shape" : "__string" + }, + "ResourceType" : { + "shape" : "__string" + } + }, + "required" : [ "Message", "ResourceId", "ResourceType" ], + "exception" : true, + "error" : { + "httpStatusCode" : 409 + } + }, + "DescribeAccountHealthRequest" : { + "type" : "structure", + "members" : { } + }, + "DescribeAccountHealthResponse" : { + "type" : "structure", + "members" : { + "MetricsAnalyzed" : { + "shape" : "__integer" + }, + "OpenProactiveInsights" : { + "shape" : "__integer" + }, + "OpenReactiveInsights" : { + "shape" : "__integer" + } + } + }, + "DescribeAccountOverviewRequest" : { + "type" : "structure", + "members" : { + "FromTime" : { + "shape" : "__timestampUnix" + }, + "ToTime" : { + "shape" : "__timestampUnix" + } + }, + "required" : [ "FromTime" ] + }, + "DescribeAccountOverviewResponse" : { + "type" : "structure", + "members" : { + "MeanTimeToRecoverInMilliseconds" : { + "shape" : "__long" + }, + "ProactiveInsights" : { + "shape" : "__integer" + }, + "ReactiveInsights" : { + "shape" : "__integer" + } + } + }, + "DescribeAnomalyRequest" : { + "type" : "structure", + "members" : { + "Id" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "Id" + } + }, + "required" : [ "Id" ] + }, + "DescribeAnomalyResponse" : { + "type" : "structure", + "members" : { + "ProactiveAnomaly" : { + "shape" : "ProactiveAnomaly" + }, + "ReactiveAnomaly" : { + "shape" : "ReactiveAnomaly" + } + } + }, + "DescribeInsightRequest" : { + "type" : "structure", + "members" : { + "Id" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "Id" + } + }, + "required" : [ "Id" ] + }, + "DescribeInsightResponse" : { + "type" : "structure", + "members" : { + "ProactiveInsight" : { + "shape" : "ProactiveInsight" + }, + "ReactiveInsight" : { + "shape" : "ReactiveInsight" + } + } + }, + "DescribeResourceCollectionHealthRequest" : { + "type" : "structure", + "members" : { + "NextToken" : { + "shape" : "__string", + "location" : "querystring", + "locationName" : "NextToken" + }, + "ResourceCollectionType" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "ResourceCollectionType" + } + }, + "required" : [ "ResourceCollectionType" ] + }, + "DescribeResourceCollectionHealthResponse" : { + "type" : "structure", + "members" : { + "CloudFormation" : { + "shape" : "__listOfCloudFormationHealth" + }, + "NextToken" : { + "shape" : "__stringMin36Max36PatternAF098AF094AF094AF094AF0912" + } + } + }, + "DescribeServiceIntegrationRequest" : { + "type" : "structure", + "members" : { } + }, + "DescribeServiceIntegrationResponse" : { + "type" : "structure", + "members" : { + "ServiceIntegration" : { + "shape" : "ServiceIntegrationConfig" + } + } + }, + "EndTimeRange" : { + "type" : "structure", + "members" : { + "FromTime" : { + "shape" : "__timestampUnix" + }, + "ToTime" : { + "shape" : "__timestampUnix" + } + } + }, + "Event" : { + "type" : "structure", + "members" : { + "DataSource" : { + "shape" : "EventDataSource" + }, + "EventClass" : { + "shape" : "EventClass" + }, + "EventSource" : { + "shape" : "__stringMin10Max50PatternAZAZ09AmazonawsComAwsEvents" + }, + "Id" : { + "shape" : "__string" + }, + "Name" : { + "shape" : "__stringMin0Max50" + }, + "ResourceCollection" : { + "shape" : "ResourceCollection" + }, + "Resources" : { + "shape" : "__listOfEventResource" + }, + "Time" : { + "shape" : "__timestampUnix" + } + } + }, + "EventClass" : { + "type" : "string", + "enum" : [ "INFRASTRUCTURE", "DEPLOYMENT", "SECURITY_CHANGE", "CONFIG_CHANGE", "SCHEMA_CHANGE" ] + }, + "EventDataSource" : { + "type" : "string", + "enum" : [ "AWS_CLOUD_TRAIL", "AWS_CODE_DEPLOY" ] + }, + "EventResource" : { + "type" : "structure", + "members" : { + "Arn" : { + "shape" : "__stringMin36Max2048PatternArnAwsAZAZ09AZ09D12" + }, + "Name" : { + "shape" : "__stringMin0Max2048Pattern" + }, + "Type" : { + "shape" : "__stringMin0Max2048Pattern" + } + } + }, + "EventTimeRange" : { + "type" : "structure", + "members" : { + "FromTime" : { + "shape" : "__timestampUnix" + }, + "ToTime" : { + "shape" : "__timestampUnix" + } + }, + "required" : [ "ToTime", "FromTime" ] + }, + "GetResourceCollectionRequest" : { + "type" : "structure", + "members" : { + "NextToken" : { + "shape" : "__string", + "location" : "querystring", + "locationName" : "NextToken" + }, + "ResourceCollectionType" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "ResourceCollectionType" + } + }, + "required" : [ "ResourceCollectionType" ] + }, + "GetResourceCollectionResponse" : { + "type" : "structure", + "members" : { + "NextToken" : { + "shape" : "__stringMin36Max36PatternAF098AF094AF094AF094AF0912" + }, + "ResourceCollection" : { + "shape" : "ResourceCollectionFilter" + } + } + }, + "InsightFeedback" : { + "type" : "structure", + "members" : { + "Feedback" : { + "shape" : "InsightFeedbackOption" + }, + "Id" : { + "shape" : "__stringMin1Max100PatternW" + } + } + }, + "InsightFeedbackOption" : { + "type" : "string", + "enum" : [ "VALID_COLLECTION", "RECOMMENDATION_USEFUL", "ALERT_TOO_SENSITIVE", "DATA_NOISY_ANOMALY", "DATA_INCORRECT" ] + }, + "InsightHealth" : { + "type" : "structure", + "members" : { + "MeanTimeToRecoverInMilliseconds" : { + "shape" : "__long" + }, + "OpenProactiveInsights" : { + "shape" : "__integer" + }, + "OpenReactiveInsights" : { + "shape" : "__integer" + } + } + }, + "InsightSeverity" : { + "type" : "string", + "enum" : [ "LOW", "MEDIUM", "HIGH" ] + }, + "InsightStatus" : { + "type" : "string", + "enum" : [ "ONGOING", "CLOSED" ] + }, + "InsightTimeRange" : { + "type" : "structure", + "members" : { + "EndTime" : { + "shape" : "__timestampUnix" + }, + "StartTime" : { + "shape" : "__timestampUnix" + } + }, + "required" : [ "StartTime" ] + }, + "InsightType" : { + "type" : "string", + "enum" : [ "REACTIVE", "PROACTIVE" ] + }, + "InternalServerException" : { + "type" : "structure", + "members" : { + "Message" : { + "shape" : "__string" + } + }, + "required" : [ "Message" ], + "exception" : true, + "error" : { + "httpStatusCode" : 500 + } + }, + "ListAnomaliesForInsightRequest" : { + "type" : "structure", + "members" : { + "InsightId" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "InsightId" + }, + "MaxResults" : { + "shape" : "__integerMin1Max500" + }, + "NextToken" : { + "shape" : "__stringMin36Max36PatternAF098AF094AF094AF094AF0912" + }, + "StartTimeRange" : { + "shape" : "StartTimeRange" + } + }, + "required" : [ "InsightId" ] + }, + "ListAnomaliesForInsightResponse" : { + "type" : "structure", + "members" : { + "NextToken" : { + "shape" : "__stringMin36Max36PatternAF098AF094AF094AF094AF0912" + }, + "ProactiveAnomalies" : { + "shape" : "__listOfProactiveAnomalySummary" + }, + "ReactiveAnomalies" : { + "shape" : "__listOfReactiveAnomalySummary" + } + } + }, + "ListEventsFilters" : { + "type" : "structure", + "members" : { + "DataSource" : { + "shape" : "EventDataSource" + }, + "EventClass" : { + "shape" : "EventClass" + }, + "EventSource" : { + "shape" : "__stringMin10Max50PatternAZAZ09AmazonawsComAwsEvents" + }, + "EventTimeRange" : { + "shape" : "EventTimeRange" + }, + "InsightId" : { + "shape" : "__stringMin1Max100PatternW" + }, + "ResourceCollection" : { + "shape" : "ResourceCollection" + } + } + }, + "ListEventsRequest" : { + "type" : "structure", + "members" : { + "Filters" : { + "shape" : "ListEventsFilters" + }, + "MaxResults" : { + "shape" : "__integerMin1Max200" + }, + "NextToken" : { + "shape" : "__stringMin36Max36PatternAF098AF094AF094AF094AF0912" + } + }, + "required" : [ "Filters" ] + }, + "ListEventsResponse" : { + "type" : "structure", + "members" : { + "Events" : { + "shape" : "__listOfEvent" + }, + "NextToken" : { + "shape" : "__stringMin36Max36PatternAF098AF094AF094AF094AF0912" + } + } + }, + "ListInsightsAnyStatusFilter" : { + "type" : "structure", + "members" : { + "StartTimeRange" : { + "shape" : "StartTimeRange" + }, + "Type" : { + "shape" : "InsightType" + } + }, + "required" : [ "Type", "StartTimeRange" ] + }, + "ListInsightsClosedStatusFilter" : { + "type" : "structure", + "members" : { + "EndTimeRange" : { + "shape" : "EndTimeRange" + }, + "Type" : { + "shape" : "InsightType" + } + }, + "required" : [ "Type", "EndTimeRange" ] + }, + "ListInsightsOngoingStatusFilter" : { + "type" : "structure", + "members" : { + "Type" : { + "shape" : "InsightType" + } + }, + "required" : [ "Type" ] + }, + "ListInsightsRequest" : { + "type" : "structure", + "members" : { + "MaxResults" : { + "shape" : "__integerMin1Max100" + }, + "NextToken" : { + "shape" : "__stringMin36Max36PatternAF098AF094AF094AF094AF0912" + }, + "StatusFilter" : { + "shape" : "ListInsightsStatusFilter" + } + }, + "required" : [ "StatusFilter" ] + }, + "ListInsightsResponse" : { + "type" : "structure", + "members" : { + "NextToken" : { + "shape" : "__stringMin36Max36PatternAF098AF094AF094AF094AF0912" + }, + "ProactiveInsights" : { + "shape" : "__listOfProactiveInsightSummary" + }, + "ReactiveInsights" : { + "shape" : "__listOfReactiveInsightSummary" + } + } + }, + "ListInsightsStatusFilter" : { + "type" : "structure", + "members" : { + "Any" : { + "shape" : "ListInsightsAnyStatusFilter" + }, + "Closed" : { + "shape" : "ListInsightsClosedStatusFilter" + }, + "Ongoing" : { + "shape" : "ListInsightsOngoingStatusFilter" + } + } + }, + "ListNotificationChannelsRequest" : { + "type" : "structure", + "members" : { + "NextToken" : { + "shape" : "__stringMin36Max36PatternAF098AF094AF094AF094AF0912" + } + } + }, + "ListNotificationChannelsResponse" : { + "type" : "structure", + "members" : { + "Channels" : { + "shape" : "__listOfNotificationChannel" + }, + "NextToken" : { + "shape" : "__stringMin36Max36PatternAF098AF094AF094AF094AF0912" + } + } + }, + "ListRecommendationsRequest" : { + "type" : "structure", + "members" : { + "InsightId" : { + "shape" : "__stringMin1Max100PatternW" + }, + "NextToken" : { + "shape" : "__stringMin36Max36PatternAF098AF094AF094AF094AF0912" + } + }, + "required" : [ "InsightId" ] + }, + "ListRecommendationsResponse" : { + "type" : "structure", + "members" : { + "NextToken" : { + "shape" : "__stringMin36Max36PatternAF098AF094AF094AF094AF0912" + }, + "Recommendations" : { + "shape" : "__listOfRecommendation" + } + } + }, + "MaxResults" : { + "type" : "integer", + "min" : 1, + "max" : 25 + }, + "NotificationChannel" : { + "type" : "structure", + "members" : { + "Config" : { + "shape" : "NotificationChannelConfig" + }, + "Id" : { + "shape" : "__stringMin36Max36PatternAF098AF094AF094AF094AF0912" + } + } + }, + "NotificationChannelConfig" : { + "type" : "structure", + "members" : { + "Sns" : { + "shape" : "SnsChannelConfig" + } + }, + "required" : [ "Sns" ] + }, + "OpsCenterIntegration" : { + "type" : "structure", + "members" : { + "OptInStatus" : { + "shape" : "OptInStatus" + } + } + }, + "OpsCenterIntegrationConfig" : { + "type" : "structure", + "members" : { + "OptInStatus" : { + "shape" : "OptInStatus" + } + } + }, + "OptInStatus" : { + "type" : "string", + "enum" : [ "ENABLED", "DISABLED" ] + }, + "PredictionTimeRange" : { + "type" : "structure", + "members" : { + "EndTime" : { + "shape" : "__timestampUnix" + }, + "StartTime" : { + "shape" : "__timestampUnix" + } + }, + "required" : [ "StartTime" ] + }, + "ProactiveAnomaly" : { + "type" : "structure", + "members" : { + "AnomalyTimeRange" : { + "shape" : "AnomalyTimeRange" + }, + "AssociatedInsightId" : { + "shape" : "__stringMin1Max100PatternW" + }, + "Id" : { + "shape" : "__stringMin1Max100PatternW" + }, + "Limit" : { + "shape" : "__double" + }, + "PredictionTimeRange" : { + "shape" : "PredictionTimeRange" + }, + "ResourceCollection" : { + "shape" : "ResourceCollection" + }, + "Severity" : { + "shape" : "AnomalySeverity" + }, + "SourceDetails" : { + "shape" : "AnomalySourceDetails" + }, + "Status" : { + "shape" : "AnomalyStatus" + }, + "UpdateTime" : { + "shape" : "__timestampUnix" + } + } + }, + "ProactiveAnomalySummary" : { + "type" : "structure", + "members" : { + "AnomalyTimeRange" : { + "shape" : "AnomalyTimeRange" + }, + "AssociatedInsightId" : { + "shape" : "__stringMin1Max100PatternW" + }, + "Id" : { + "shape" : "__stringMin1Max100PatternW" + }, + "Limit" : { + "shape" : "__double" + }, + "PredictionTimeRange" : { + "shape" : "PredictionTimeRange" + }, + "ResourceCollection" : { + "shape" : "ResourceCollection" + }, + "Severity" : { + "shape" : "AnomalySeverity" + }, + "SourceDetails" : { + "shape" : "AnomalySourceDetails" + }, + "Status" : { + "shape" : "AnomalyStatus" + }, + "UpdateTime" : { + "shape" : "__timestampUnix" + } + } + }, + "ProactiveInsight" : { + "type" : "structure", + "members" : { + "Id" : { + "shape" : "__stringMin1Max100PatternW" + }, + "InsightTimeRange" : { + "shape" : "InsightTimeRange" + }, + "Name" : { + "shape" : "__stringMin1Max530PatternSS" + }, + "PredictionTimeRange" : { + "shape" : "PredictionTimeRange" + }, + "ResourceCollection" : { + "shape" : "ResourceCollection" + }, + "Severity" : { + "shape" : "InsightSeverity" + }, + "SsmOpsItemId" : { + "shape" : "__stringMin1Max100Pattern" + }, + "Status" : { + "shape" : "InsightStatus" + } + } + }, + "ProactiveInsightSummary" : { + "type" : "structure", + "members" : { + "Id" : { + "shape" : "__stringMin1Max100PatternW" + }, + "InsightTimeRange" : { + "shape" : "InsightTimeRange" + }, + "Name" : { + "shape" : "__stringMin1Max530PatternSS" + }, + "PredictionTimeRange" : { + "shape" : "PredictionTimeRange" + }, + "ResourceCollection" : { + "shape" : "ResourceCollection" + }, + "Severity" : { + "shape" : "InsightSeverity" + }, + "Status" : { + "shape" : "InsightStatus" + } + } + }, + "PutFeedbackRequest" : { + "type" : "structure", + "members" : { + "InsightFeedback" : { + "shape" : "InsightFeedback" + } + } + }, + "PutFeedbackResponse" : { + "type" : "structure", + "members" : { } + }, + "ReactiveAnomaly" : { + "type" : "structure", + "members" : { + "AnomalyTimeRange" : { + "shape" : "AnomalyTimeRange" + }, + "AssociatedInsightId" : { + "shape" : "__stringMin1Max100PatternW" + }, + "Id" : { + "shape" : "__stringMin1Max100PatternW" + }, + "ResourceCollection" : { + "shape" : "ResourceCollection" + }, + "Severity" : { + "shape" : "AnomalySeverity" + }, + "SourceDetails" : { + "shape" : "AnomalySourceDetails" + }, + "Status" : { + "shape" : "AnomalyStatus" + } + } + }, + "ReactiveAnomalySummary" : { + "type" : "structure", + "members" : { + "AnomalyTimeRange" : { + "shape" : "AnomalyTimeRange" + }, + "AssociatedInsightId" : { + "shape" : "__stringMin1Max100PatternW" + }, + "Id" : { + "shape" : "__stringMin1Max100PatternW" + }, + "ResourceCollection" : { + "shape" : "ResourceCollection" + }, + "Severity" : { + "shape" : "AnomalySeverity" + }, + "SourceDetails" : { + "shape" : "AnomalySourceDetails" + }, + "Status" : { + "shape" : "AnomalyStatus" + } + } + }, + "ReactiveInsight" : { + "type" : "structure", + "members" : { + "Id" : { + "shape" : "__stringMin1Max100PatternW" + }, + "InsightTimeRange" : { + "shape" : "InsightTimeRange" + }, + "Name" : { + "shape" : "__stringMin1Max530PatternSS" + }, + "ResourceCollection" : { + "shape" : "ResourceCollection" + }, + "Severity" : { + "shape" : "InsightSeverity" + }, + "SsmOpsItemId" : { + "shape" : "__stringMin1Max100Pattern" + }, + "Status" : { + "shape" : "InsightStatus" + } + } + }, + "ReactiveInsightSummary" : { + "type" : "structure", + "members" : { + "Id" : { + "shape" : "__stringMin1Max100PatternW" + }, + "InsightTimeRange" : { + "shape" : "InsightTimeRange" + }, + "Name" : { + "shape" : "__stringMin1Max530PatternSS" + }, + "ResourceCollection" : { + "shape" : "ResourceCollection" + }, + "Severity" : { + "shape" : "InsightSeverity" + }, + "Status" : { + "shape" : "InsightStatus" + } + } + }, + "Recommendation" : { + "type" : "structure", + "members" : { + "Description" : { + "shape" : "__string" + }, + "Link" : { + "shape" : "__string" + }, + "Name" : { + "shape" : "__string" + }, + "Reason" : { + "shape" : "__string" + }, + "RelatedAnomalies" : { + "shape" : "__listOfRecommendationRelatedAnomaly" + }, + "RelatedEvents" : { + "shape" : "__listOfRecommendationRelatedEvent" + } + } + }, + "RecommendationRelatedAnomaly" : { + "type" : "structure", + "members" : { + "Resources" : { + "shape" : "__listOfRecommendationRelatedAnomalyResource" + }, + "SourceDetails" : { + "shape" : "__listOfRecommendationRelatedAnomalySourceDetail" + } + } + }, + "RecommendationRelatedAnomalyResource" : { + "type" : "structure", + "members" : { + "Name" : { + "shape" : "__string" + }, + "Type" : { + "shape" : "__string" + } + } + }, + "RecommendationRelatedAnomalySourceDetail" : { + "type" : "structure", + "members" : { + "CloudWatchMetrics" : { + "shape" : "__listOfRecommendationRelatedCloudWatchMetricsSourceDetail" + } + } + }, + "RecommendationRelatedCloudWatchMetricsSourceDetail" : { + "type" : "structure", + "members" : { + "MetricName" : { + "shape" : "__string" + }, + "Namespace" : { + "shape" : "__string" + } + } + }, + "RecommendationRelatedEvent" : { + "type" : "structure", + "members" : { + "Name" : { + "shape" : "__string" + }, + "Resources" : { + "shape" : "__listOfRecommendationRelatedEventResource" + } + } + }, + "RecommendationRelatedEventResource" : { + "type" : "structure", + "members" : { + "Name" : { + "shape" : "__string" + }, + "Type" : { + "shape" : "__string" + } + } + }, + "RemoveNotificationChannelRequest" : { + "type" : "structure", + "members" : { + "Id" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "Id" + } + }, + "required" : [ "Id" ] + }, + "RemoveNotificationChannelResponse" : { + "type" : "structure", + "members" : { } + }, + "ResourceCollection" : { + "type" : "structure", + "members" : { + "CloudFormation" : { + "shape" : "CloudFormationCollection" + } + } + }, + "ResourceCollectionFilter" : { + "type" : "structure", + "members" : { + "CloudFormation" : { + "shape" : "CloudFormationCollectionFilter" + } + } + }, + "ResourceNotFoundException" : { + "type" : "structure", + "members" : { + "Message" : { + "shape" : "__string" + }, + "ResourceId" : { + "shape" : "__string" + }, + "ResourceType" : { + "shape" : "__string" + } + }, + "required" : [ "Message", "ResourceId", "ResourceType" ], + "exception" : true, + "error" : { + "httpStatusCode" : 404 + } + }, + "SearchInsightsFilters" : { + "type" : "structure", + "members" : { + "ResourceCollection" : { + "shape" : "ResourceCollection" + }, + "Severities" : { + "shape" : "__listOfInsightSeverity" + }, + "Statuses" : { + "shape" : "__listOfInsightStatus" + } + } + }, + "SearchInsightsRequest" : { + "type" : "structure", + "members" : { + "Filters" : { + "shape" : "SearchInsightsFilters" + }, + "MaxResults" : { + "shape" : "__integerMin1Max100" + }, + "NextToken" : { + "shape" : "__stringMin36Max36PatternAF098AF094AF094AF094AF0912" + }, + "StartTimeRange" : { + "shape" : "StartTimeRange" + }, + "Type" : { + "shape" : "InsightType" + } + }, + "required" : [ "Type", "StartTimeRange" ] + }, + "SearchInsightsResponse" : { + "type" : "structure", + "members" : { + "NextToken" : { + "shape" : "__stringMin36Max36PatternAF098AF094AF094AF094AF0912" + }, + "ProactiveInsights" : { + "shape" : "__listOfProactiveInsightSummary" + }, + "ReactiveInsights" : { + "shape" : "__listOfReactiveInsightSummary" + } + } + }, + "ServiceIntegrationConfig" : { + "type" : "structure", + "members" : { + "OpsCenter" : { + "shape" : "OpsCenterIntegration" + } + } + }, + "ServiceQuotaExceededException" : { + "type" : "structure", + "members" : { + "Message" : { + "shape" : "__string" + } + }, + "exception" : true, + "error" : { + "httpStatusCode" : 402 + } + }, + "SnsChannelConfig" : { + "type" : "structure", + "members" : { + "TopicArn" : { + "shape" : "__stringMin36Max1024PatternArnAwsAZ09SnsAZ09D12" + } + } + }, + "StartTimeRange" : { + "type" : "structure", + "members" : { + "FromTime" : { + "shape" : "__timestampUnix" + }, + "ToTime" : { + "shape" : "__timestampUnix" + } + } + }, + "ThrottlingException" : { + "type" : "structure", + "members" : { + "Message" : { + "shape" : "__string" + }, + "QuotaCode" : { + "shape" : "__string" + }, + "ServiceCode" : { + "shape" : "__string" + } + }, + "required" : [ "Message" ], + "exception" : true, + "error" : { + "httpStatusCode" : 429 + } + }, + "UpdateCloudFormationCollectionFilter" : { + "type" : "structure", + "members" : { + "StackNames" : { + "shape" : "__listOf__stringMin1Max128PatternAZAZAZAZ09" + } + } + }, + "UpdateResourceCollectionAction" : { + "type" : "string", + "enum" : [ "ADD", "REMOVE" ] + }, + "UpdateResourceCollectionFilter" : { + "type" : "structure", + "members" : { + "CloudFormation" : { + "shape" : "UpdateCloudFormationCollectionFilter" + } + } + }, + "UpdateResourceCollectionRequest" : { + "type" : "structure", + "members" : { + "Action" : { + "shape" : "UpdateResourceCollectionAction" + }, + "ResourceCollection" : { + "shape" : "UpdateResourceCollectionFilter" + } + }, + "required" : [ "Action", "ResourceCollection" ] + }, + "UpdateResourceCollectionResponse" : { + "type" : "structure", + "members" : { } + }, + "UpdateServiceIntegrationConfig" : { + "type" : "structure", + "members" : { + "OpsCenter" : { + "shape" : "OpsCenterIntegrationConfig" + } + } + }, + "UpdateServiceIntegrationRequest" : { + "type" : "structure", + "members" : { + "ServiceIntegration" : { + "shape" : "UpdateServiceIntegrationConfig" + } + }, + "required" : [ "ServiceIntegration" ] + }, + "UpdateServiceIntegrationResponse" : { + "type" : "structure", + "members" : { } + }, + "ValidationException" : { + "type" : "structure", + "members" : { + "Fields" : { + "shape" : "__listOfValidationExceptionField" + }, + "Message" : { + "shape" : "__string" + }, + "Reason" : { + "shape" : "ValidationExceptionReason" + } + }, + "required" : [ "Message" ], + "exception" : true, + "error" : { + "httpStatusCode" : 400 + } + }, + "ValidationExceptionField" : { + "type" : "structure", + "members" : { + "Message" : { + "shape" : "__string" + }, + "Name" : { + "shape" : "__string" + } + }, + "required" : [ "Message", "Name" ] + }, + "ValidationExceptionReason" : { + "type" : "string", + "enum" : [ "UNKNOWN_OPERATION", "CANNOT_PARSE", "FIELD_VALIDATION_FAILED", "OTHER" ] + }, + "__boolean" : { + "type" : "boolean" + }, + "__double" : { + "type" : "double" + }, + "__integer" : { + "type" : "integer" + }, + "__integerMin1Max100" : { + "type" : "integer", + "min" : 1, + "max" : 100 + }, + "__integerMin1Max200" : { + "type" : "integer", + "min" : 1, + "max" : 200 + }, + "__integerMin1Max500" : { + "type" : "integer", + "min" : 1, + "max" : 500 + }, + "__listOfCloudFormationHealth" : { + "type" : "list", + "member" : { + "shape" : "CloudFormationHealth" + } + }, + "__listOfCloudWatchMetricsDetail" : { + "type" : "list", + "member" : { + "shape" : "CloudWatchMetricsDetail" + } + }, + "__listOfCloudWatchMetricsDimension" : { + "type" : "list", + "member" : { + "shape" : "CloudWatchMetricsDimension" + } + }, + "__listOfEvent" : { + "type" : "list", + "member" : { + "shape" : "Event" + } + }, + "__listOfEventResource" : { + "type" : "list", + "member" : { + "shape" : "EventResource" + } + }, + "__listOfInsightSeverity" : { + "type" : "list", + "member" : { + "shape" : "InsightSeverity" + } + }, + "__listOfInsightStatus" : { + "type" : "list", + "member" : { + "shape" : "InsightStatus" + } + }, + "__listOfNotificationChannel" : { + "type" : "list", + "member" : { + "shape" : "NotificationChannel" + } + }, + "__listOfProactiveAnomalySummary" : { + "type" : "list", + "member" : { + "shape" : "ProactiveAnomalySummary" + } + }, + "__listOfProactiveInsightSummary" : { + "type" : "list", + "member" : { + "shape" : "ProactiveInsightSummary" + } + }, + "__listOfReactiveAnomalySummary" : { + "type" : "list", + "member" : { + "shape" : "ReactiveAnomalySummary" + } + }, + "__listOfReactiveInsightSummary" : { + "type" : "list", + "member" : { + "shape" : "ReactiveInsightSummary" + } + }, + "__listOfRecommendation" : { + "type" : "list", + "member" : { + "shape" : "Recommendation" + } + }, + "__listOfRecommendationRelatedAnomaly" : { + "type" : "list", + "member" : { + "shape" : "RecommendationRelatedAnomaly" + } + }, + "__listOfRecommendationRelatedAnomalyResource" : { + "type" : "list", + "member" : { + "shape" : "RecommendationRelatedAnomalyResource" + } + }, + "__listOfRecommendationRelatedAnomalySourceDetail" : { + "type" : "list", + "member" : { + "shape" : "RecommendationRelatedAnomalySourceDetail" + } + }, + "__listOfRecommendationRelatedCloudWatchMetricsSourceDetail" : { + "type" : "list", + "member" : { + "shape" : "RecommendationRelatedCloudWatchMetricsSourceDetail" + } + }, + "__listOfRecommendationRelatedEvent" : { + "type" : "list", + "member" : { + "shape" : "RecommendationRelatedEvent" + } + }, + "__listOfRecommendationRelatedEventResource" : { + "type" : "list", + "member" : { + "shape" : "RecommendationRelatedEventResource" + } + }, + "__listOfValidationExceptionField" : { + "type" : "list", + "member" : { + "shape" : "ValidationExceptionField" + } + }, + "__listOf__stringMin1Max128PatternAZAZAZAZ09" : { + "type" : "list", + "member" : { + "shape" : "__stringMin1Max128PatternAZAZAZAZ09" + } + }, + "__long" : { + "type" : "long" + }, + "__string" : { + "type" : "string" + }, + "__stringMin0Max2048Pattern" : { + "type" : "string", + "min" : 0, + "max" : 2048, + "pattern" : "^.*$" + }, + "__stringMin0Max50" : { + "type" : "string", + "min" : 0, + "max" : 50 + }, + "__stringMin10Max50PatternAZAZ09AmazonawsComAwsEvents" : { + "type" : "string", + "min" : 10, + "max" : 50, + "pattern" : "^[a-z]+[a-z0-9]*\\.amazonaws\\.com|aws\\.events$" + }, + "__stringMin1Max100Pattern" : { + "type" : "string", + "min" : 1, + "max" : 100, + "pattern" : "^.*$" + }, + "__stringMin1Max100PatternW" : { + "type" : "string", + "min" : 1, + "max" : 100, + "pattern" : "^[\\w-]*$" + }, + "__stringMin1Max128PatternAZAZAZAZ09" : { + "type" : "string", + "min" : 1, + "max" : 128, + "pattern" : "^[a-zA-Z*]+[a-zA-Z0-9-]*$" + }, + "__stringMin1Max530PatternSS" : { + "type" : "string", + "min" : 1, + "max" : 530, + "pattern" : "^[\\s\\S]*$" + }, + "__stringMin36Max1024PatternArnAwsAZ09SnsAZ09D12" : { + "type" : "string", + "min" : 36, + "max" : 1024, + "pattern" : "^arn:aws[a-z0-9-]*:sns:[a-z0-9-]+:\\d{12}:[^:]+$" + }, + "__stringMin36Max2048PatternArnAwsAZAZ09AZ09D12" : { + "type" : "string", + "min" : 36, + "max" : 2048, + "pattern" : "^arn:aws[-a-z]*:[a-z0-9-]*:[a-z0-9-]*:\\d{12}:.*$" + }, + "__stringMin36Max36PatternAF098AF094AF094AF094AF0912" : { + "type" : "string", + "min" : 36, + "max" : 36, + "pattern" : "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$" + }, + "__timestampIso8601" : { + "type" : "timestamp", + "timestampFormat" : "iso8601" + }, + "__timestampUnix" : { + "type" : "timestamp", + "timestampFormat" : "unixTimestamp" + } + } +} \ No newline at end of file diff --git a/services/directconnect/pom.xml b/services/directconnect/pom.xml index 5801a9824310..cf17bcba2c45 100644 --- a/services/directconnect/pom.xml +++ b/services/directconnect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT directconnect AWS Java SDK :: Services :: AWS Direct Connect diff --git a/services/directory/pom.xml b/services/directory/pom.xml index b0ba261f860a..c8e3254fbb7a 100644 --- a/services/directory/pom.xml +++ b/services/directory/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT directory AWS Java SDK :: Services :: AWS Directory Service diff --git a/services/directory/src/main/resources/codegen-resources/service-2.json b/services/directory/src/main/resources/codegen-resources/service-2.json index 6c4d33a016af..4cf712d980f8 100644 --- a/services/directory/src/main/resources/codegen-resources/service-2.json +++ b/services/directory/src/main/resources/codegen-resources/service-2.json @@ -49,6 +49,28 @@ ], "documentation":"

If the DNS server for your on-premises domain uses a publicly addressable IP address, you must add a CIDR address block to correctly route traffic to and from your Microsoft AD on Amazon Web Services. AddIpRoutes adds this address block. You can also use AddIpRoutes to facilitate routing traffic that uses public IP ranges from your Microsoft AD on AWS to a peer VPC.

Before you call AddIpRoutes, ensure that all of the required permissions have been explicitly granted through a policy. For details about what permissions are required to run the AddIpRoutes operation, see AWS Directory Service API Permissions: Actions, Resources, and Conditions Reference.

" }, + "AddRegion":{ + "name":"AddRegion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddRegionRequest"}, + "output":{"shape":"AddRegionResult"}, + "errors":[ + {"shape":"DirectoryUnavailableException"}, + {"shape":"InvalidParameterException"}, + {"shape":"EntityDoesNotExistException"}, + {"shape":"DirectoryAlreadyInRegionException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"DirectoryDoesNotExistException"}, + {"shape":"RegionLimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ], + "documentation":"

Adds two domain controllers in the specified Region for the specified directory.

" + }, "AddTagsToResource":{ "name":"AddTagsToResource", "http":{ @@ -132,7 +154,7 @@ {"shape":"ClientException"}, {"shape":"ServiceException"} ], - "documentation":"

Creates a computer account in the specified directory, and joins the computer to the directory.

" + "documentation":"

Creates an Active Directory computer object in the specified directory.

" }, "CreateConditionalForwarder":{ "name":"CreateConditionalForwarder", @@ -462,6 +484,25 @@ ], "documentation":"

Describes the status of LDAP security for the specified directory.

" }, + "DescribeRegions":{ + "name":"DescribeRegions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeRegionsRequest"}, + "output":{"shape":"DescribeRegionsResult"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"DirectoryDoesNotExistException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ], + "documentation":"

Provides information about the Regions that are configured for multi-Region replication.

" + }, "DescribeSharedDirectories":{ "name":"DescribeSharedDirectories", "http":{ @@ -515,6 +556,24 @@ ], "documentation":"

Obtains information about the trust relationships for this account.

If no input parameters are provided, such as DirectoryId or TrustIds, this request describes all the trust relationships belonging to the account.

" }, + "DisableClientAuthentication":{ + "name":"DisableClientAuthentication", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisableClientAuthenticationRequest"}, + "output":{"shape":"DisableClientAuthenticationResult"}, + "errors":[ + {"shape":"DirectoryDoesNotExistException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"InvalidClientAuthStatusException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ], + "documentation":"

Disable client authentication for smart cards.

" + }, "DisableLDAPS":{ "name":"DisableLDAPS", "http":{ @@ -566,6 +625,25 @@ ], "documentation":"

Disables single-sign on for a directory.

" }, + "EnableClientAuthentication":{ + "name":"EnableClientAuthentication", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableClientAuthenticationRequest"}, + "output":{"shape":"EnableClientAuthenticationResult"}, + "errors":[ + {"shape":"DirectoryDoesNotExistException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"InvalidClientAuthStatusException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoAvailableCertificateException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ], + "documentation":"

Enable client authentication for smardtcards.

" + }, "EnableLDAPS":{ "name":"EnableLDAPS", "http":{ @@ -805,6 +883,24 @@ ], "documentation":"

Removes IP address blocks from a directory.

" }, + "RemoveRegion":{ + "name":"RemoveRegion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveRegionRequest"}, + "output":{"shape":"RemoveRegionResult"}, + "errors":[ + {"shape":"DirectoryUnavailableException"}, + {"shape":"DirectoryDoesNotExistException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ], + "documentation":"

Stops all replication and removes the domain controllers from the specified Region. You cannot remove the primary Region with this operation. Instead, use the DeleteDirectory API.

" + }, "RemoveTagsFromResource":{ "name":"RemoveTagsFromResource", "http":{ @@ -1060,6 +1156,30 @@ "members":{ } }, + "AddRegionRequest":{ + "type":"structure", + "required":[ + "DirectoryId", + "RegionName", + "VPCSettings" + ], + "members":{ + "DirectoryId":{ + "shape":"DirectoryId", + "documentation":"

The identifier of the directory to which you want to add Region replication.

" + }, + "RegionName":{ + "shape":"RegionName", + "documentation":"

The name of the Region where you want to add domain controllers for replication. For example, us-east-1.

" + }, + "VPCSettings":{"shape":"DirectoryVpcSettings"} + } + }, + "AddRegionResult":{ + "type":"structure", + "members":{ + } + }, "AddTagsToResourceRequest":{ "type":"structure", "required":[ @@ -1083,6 +1203,10 @@ } }, "AddedDateTime":{"type":"timestamp"}, + "AdditionalRegions":{ + "type":"list", + "member":{"shape":"RegionName"} + }, "AliasName":{ "type":"string", "max":62, @@ -1180,6 +1304,14 @@ "ExpiryDateTime":{ "shape":"CertificateExpiryDateTime", "documentation":"

The date and time when the certificate will expire.

" + }, + "Type":{ + "shape":"CertificateType", + "documentation":"

Select ClientCertAuth for smart card integration.

" + }, + "ClientCertAuthSettings":{ + "shape":"ClientCertAuthSettings", + "documentation":"

Provides information about the client certificate authentication settings. The default value is ClientLDAPS.

" } }, "documentation":"

Information about the certificate.

" @@ -1240,6 +1372,10 @@ "ExpiryDateTime":{ "shape":"CertificateExpiryDateTime", "documentation":"

The date and time when the certificate will expire.

" + }, + "Type":{ + "shape":"CertificateType", + "documentation":"

Displays the type of certificate.

" } }, "documentation":"

Contains general information about a certificate.

" @@ -1266,6 +1402,13 @@ ] }, "CertificateStateReason":{"type":"string"}, + "CertificateType":{ + "type":"string", + "enum":[ + "ClientCertAuth", + "ClientLDAPS" + ] + }, "CertificatesInfo":{ "type":"list", "member":{"shape":"CertificateInfo"} @@ -1278,6 +1421,20 @@ "type":"list", "member":{"shape":"CidrIp"} }, + "ClientAuthenticationType":{ + "type":"string", + "enum":["SmartCard"] + }, + "ClientCertAuthSettings":{ + "type":"structure", + "members":{ + "OCSPUrl":{ + "shape":"OCSPUrl", + "documentation":"

Specifies the URL of the default OCSP server used to check for revocation status.

" + } + }, + "documentation":"

Contains information about the client certificate authentication settings, such as ClientLDAPS or ClientCertAuth.

" + }, "ClientException":{ "type":"structure", "members":{ @@ -1517,7 +1674,7 @@ }, "Password":{ "shape":"Password", - "documentation":"

The password for the directory administrator. The directory creation process creates a directory administrator account with the user name Administrator and this password.

If you need to change the password for the administrator account, you can use the ResetUserPassword API call.

" + "documentation":"

The password for the directory administrator. The directory creation process creates a directory administrator account with the user name Administrator and this password.

If you need to change the password for the administrator account, you can use the ResetUserPassword API call.

The regex pattern for this string is made up of the following conditions:

  • Length (?=^.{8,64}$) – Must be between 8 and 64 characters

AND any 3 of the following password complexity rules required by Active Directory:

  • Numbers and upper case and lowercase (?=.*\\d)(?=.*[A-Z])(?=.*[a-z])

  • Numbers and special characters and lower case (?=.*\\d)(?=.*[^A-Za-z0-9\\s])(?=.*[a-z])

  • Special characters and upper case and lower case (?=.*[^A-Za-z0-9\\s])(?=.*[A-Z])(?=.*[a-z])

  • Numbers and upper case and special characters (?=.*\\d)(?=.*[A-Z])(?=.*[^A-Za-z0-9\\s])

For additional information about how Active Directory passwords are enforced, see Password must meet complexity requirements on the Microsoft website.

" }, "Description":{ "shape":"Description", @@ -2036,6 +2193,37 @@ } } }, + "DescribeRegionsRequest":{ + "type":"structure", + "required":["DirectoryId"], + "members":{ + "DirectoryId":{ + "shape":"DirectoryId", + "documentation":"

The identifier of the directory.

" + }, + "RegionName":{ + "shape":"RegionName", + "documentation":"

The name of the Region. For example, us-east-1.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The DescribeRegionsResult.NextToken value from a previous call to DescribeRegions. Pass null if this is the first call.

" + } + } + }, + "DescribeRegionsResult":{ + "type":"structure", + "members":{ + "RegionsDescription":{ + "shape":"RegionsDescription", + "documentation":"

List of Region information related to the directory for each replicated Region.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If not null, more results are available. Pass this value for the NextToken parameter in a subsequent call to DescribeRegions to retrieve the next set of items.

" + } + } + }, "DescribeSharedDirectoriesRequest":{ "type":"structure", "required":["OwnerDirectoryId"], @@ -2153,6 +2341,15 @@ "type":"integer", "min":2 }, + "DirectoryAlreadyInRegionException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"}, + "RequestId":{"shape":"RequestId"} + }, + "documentation":"

The Region you specified is the same Region where the AWS Managed Microsoft AD directory was created. Specify a different Region and try again.

", + "exception":true + }, "DirectoryAlreadySharedException":{ "type":"structure", "members":{ @@ -2318,6 +2515,10 @@ "OwnerDirectoryDescription":{ "shape":"OwnerDirectoryDescription", "documentation":"

Describes the AWS Managed Microsoft AD directory in the directory owner account.

" + }, + "RegionsInfo":{ + "shape":"RegionsInfo", + "documentation":"

Lists the Regions where the directory has replicated.

" } }, "documentation":"

Contains information about an AWS Directory Service directory.

" @@ -2418,7 +2619,7 @@ }, "DirectoryShortName":{ "type":"string", - "pattern":"^[^\\\\/:*?\\\"\\<\\>|.]+[^\\\\/:*?\\\"<>|]*$" + "pattern":"^[^\\\\/:*?\"<>|.]+[^\\\\/:*?\"<>|]*$" }, "DirectorySize":{ "type":"string", @@ -2501,6 +2702,28 @@ }, "documentation":"

Contains information about the directory.

" }, + "DisableClientAuthenticationRequest":{ + "type":"structure", + "required":[ + "DirectoryId", + "Type" + ], + "members":{ + "DirectoryId":{ + "shape":"DirectoryId", + "documentation":"

Disable client authentication in a specified directory for smart cards.

" + }, + "Type":{ + "shape":"ClientAuthenticationType", + "documentation":"

Disable the type of client authentication request.

" + } + } + }, + "DisableClientAuthenticationResult":{ + "type":"structure", + "members":{ + } + }, "DisableLDAPSRequest":{ "type":"structure", "required":[ @@ -2649,6 +2872,28 @@ "type":"list", "member":{"shape":"DomainController"} }, + "EnableClientAuthenticationRequest":{ + "type":"structure", + "required":[ + "DirectoryId", + "Type" + ], + "members":{ + "DirectoryId":{ + "shape":"DirectoryId", + "documentation":"

Enable client authentication in a specified directory for smart cards.

" + }, + "Type":{ + "shape":"ClientAuthenticationType", + "documentation":"

Enable the type of client authentication request.

" + } + } + }, + "EnableClientAuthenticationResult":{ + "type":"structure", + "members":{ + } + }, "EnableLDAPSRequest":{ "type":"structure", "required":[ @@ -2828,6 +3073,15 @@ "documentation":"

The certificate PEM that was provided has incorrect encoding.

", "exception":true }, + "InvalidClientAuthStatusException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"}, + "RequestId":{"shape":"RequestId"} + }, + "documentation":"

The client authorization was invalid.

", + "exception":true + }, "InvalidLDAPSStatusException":{ "type":"structure", "members":{ @@ -3199,6 +3453,12 @@ "max":1024, "sensitive":true }, + "OCSPUrl":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"^(https?|ftp|file|ldaps?)://[-a-zA-Z0-9+&@#/%?=~_|!:,.;()]*[-a-zA-Z0-9+&@#/%=~_|()]" + }, "OrganizationalUnitDN":{ "type":"string", "max":2000, @@ -3282,7 +3542,7 @@ "members":{ "RadiusServers":{ "shape":"Servers", - "documentation":"

An array of strings that contains the IP addresses of the RADIUS server endpoints, or the IP addresses of your RADIUS server load balancer.

" + "documentation":"

An array of strings that contains the fully qualified domain name (FQDN) or IP addresses of the RADIUS server endpoints, or the FQDN or IP addresses of your RADIUS server load balancer.

" }, "RadiusPort":{ "shape":"PortNumber", @@ -3334,6 +3594,84 @@ "max":20, "min":1 }, + "RegionDescription":{ + "type":"structure", + "members":{ + "DirectoryId":{ + "shape":"DirectoryId", + "documentation":"

The identifier of the directory.

" + }, + "RegionName":{ + "shape":"RegionName", + "documentation":"

The name of the Region. For example, us-east-1.

" + }, + "RegionType":{ + "shape":"RegionType", + "documentation":"

Specifies whether the Region is the primary Region or an additional Region.

" + }, + "Status":{ + "shape":"DirectoryStage", + "documentation":"

The status of the replication process for the specified Region.

" + }, + "VpcSettings":{"shape":"DirectoryVpcSettings"}, + "DesiredNumberOfDomainControllers":{ + "shape":"DesiredNumberOfDomainControllers", + "documentation":"

The desired number of domain controllers in the specified Region for the specified directory.

" + }, + "LaunchTime":{ + "shape":"LaunchTime", + "documentation":"

Specifies when the Region replication began.

" + }, + "StatusLastUpdatedDateTime":{ + "shape":"StateLastUpdatedDateTime", + "documentation":"

The date and time that the Region status was last updated.

" + }, + "LastUpdatedDateTime":{ + "shape":"LastUpdatedDateTime", + "documentation":"

The date and time that the Region description was last updated.

" + } + }, + "documentation":"

The replicated Region information for a directory.

" + }, + "RegionLimitExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"}, + "RequestId":{"shape":"RequestId"} + }, + "documentation":"

You have reached the limit for maximum number of simultaneous Region replications per directory.

", + "exception":true + }, + "RegionName":{ + "type":"string", + "max":32, + "min":8 + }, + "RegionType":{ + "type":"string", + "enum":[ + "Primary", + "Additional" + ] + }, + "RegionsDescription":{ + "type":"list", + "member":{"shape":"RegionDescription"} + }, + "RegionsInfo":{ + "type":"structure", + "members":{ + "PrimaryRegion":{ + "shape":"RegionName", + "documentation":"

The Region where the AWS Managed Microsoft AD directory was originally created.

" + }, + "AdditionalRegions":{ + "shape":"AdditionalRegions", + "documentation":"

Lists the Regions where the directory has been replicated, excluding the primary Region.

" + } + }, + "documentation":"

Provides information about the Regions that are configured for multi-Region replication.

" + }, "RegisterCertificateRequest":{ "type":"structure", "required":[ @@ -3348,7 +3686,12 @@ "CertificateData":{ "shape":"CertificateData", "documentation":"

The certificate PEM string that needs to be registered.

" - } + }, + "Type":{ + "shape":"CertificateType", + "documentation":"

The certificate type to register for the request.

" + }, + "ClientCertAuthSettings":{"shape":"ClientCertAuthSettings"} } }, "RegisterCertificateResult":{ @@ -3433,6 +3776,21 @@ "members":{ } }, + "RemoveRegionRequest":{ + "type":"structure", + "required":["DirectoryId"], + "members":{ + "DirectoryId":{ + "shape":"DirectoryId", + "documentation":"

The identifier of the directory for which you want to remove Region replication.

" + } + } + }, + "RemoveRegionResult":{ + "type":"structure", + "members":{ + } + }, "RemoveTagsFromResourceRequest":{ "type":"structure", "required":[ diff --git a/services/dlm/pom.xml b/services/dlm/pom.xml index ad22b9aa9ef8..332dcfe5ed06 100644 --- a/services/dlm/pom.xml +++ b/services/dlm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT dlm AWS Java SDK :: Services :: DLM diff --git a/services/dlm/src/main/resources/codegen-resources/service-2.json b/services/dlm/src/main/resources/codegen-resources/service-2.json index c6bece93454e..0ce73d55690e 100644 --- a/services/dlm/src/main/resources/codegen-resources/service-2.json +++ b/services/dlm/src/main/resources/codegen-resources/service-2.json @@ -503,6 +503,10 @@ "Tags":{ "shape":"TagMap", "documentation":"

The tags.

" + }, + "PolicyType":{ + "shape":"PolicyTypeValues", + "documentation":"

The type of policy. EBS_SNAPSHOT_MANAGEMENT indicates that the policy manages the lifecycle of Amazon EBS snapshots. IMAGE_MANAGEMENT indicates that the policy manages the lifecycle of EBS-backed AMIs.

" } }, "documentation":"

Summary information about a lifecycle policy.

" @@ -546,6 +550,7 @@ } } }, + "NoReboot":{"type":"boolean"}, "Parameter":{"type":"string"}, "ParameterList":{ "type":"list", @@ -557,6 +562,10 @@ "ExcludeBootVolume":{ "shape":"ExcludeBootVolume", "documentation":"

[EBS Snapshot Management – Instance policies only] Indicates whether to exclude the root volume from snapshots created using CreateSnapshots. The default is false.

" + }, + "NoReboot":{ + "shape":"NoReboot", + "documentation":"

Applies to AMI lifecycle policies only. Indicates whether targeted instances are rebooted when the lifecycle policy runs. true indicates that targeted instances are not rebooted when the policy runs. false indicates that target instances are rebooted when the policy runs. The default is true (instance are not rebooted).

" } }, "documentation":"

Specifies optional parameters to add to a policy. The set of valid parameters depends on the combination of policy type and resource type.

" @@ -578,7 +587,7 @@ "members":{ "PolicyType":{ "shape":"PolicyTypeValues", - "documentation":"

The valid target resource types and actions a policy can manage. The default is EBS_SNAPSHOT_MANAGEMENT.

" + "documentation":"

The valid target resource types and actions a policy can manage. Specify EBS_SNAPSHOT_MANAGEMENT to create a lifecycle policy that manages the lifecycle of Amazon EBS snapshots. Specify IMAGE_MANAGEMENT to create a lifecycle policy that manages the lifecycle of EBS-backed AMIs. The default is EBS_SNAPSHOT_MANAGEMENT.

" }, "ResourceTypes":{ "shape":"ResourceTypeValuesList", @@ -611,7 +620,10 @@ }, "PolicyTypeValues":{ "type":"string", - "enum":["EBS_SNAPSHOT_MANAGEMENT"] + "enum":[ + "EBS_SNAPSHOT_MANAGEMENT", + "IMAGE_MANAGEMENT" + ] }, "ResourceNotFoundException":{ "type":"structure", diff --git a/services/docdb/pom.xml b/services/docdb/pom.xml index a0bd679497e4..082cd1059e98 100644 --- a/services/docdb/pom.xml +++ b/services/docdb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT docdb AWS Java SDK :: Services :: DocDB diff --git a/services/docdb/src/main/resources/codegen-resources/service-2.json b/services/docdb/src/main/resources/codegen-resources/service-2.json index 206083a497d3..0b2e77f61677 100644 --- a/services/docdb/src/main/resources/codegen-resources/service-2.json +++ b/services/docdb/src/main/resources/codegen-resources/service-2.json @@ -1107,7 +1107,7 @@ }, "KmsKeyId":{ "shape":"String", - "documentation":"

The AWS KMS key identifier for an encrypted cluster.

The AWS KMS key identifier is the Amazon Resource Name (ARN) for the AWS KMS encryption key. If you are creating a cluster using the same AWS account that owns the AWS KMS encryption key that is used to encrypt the new cluster, you can use the AWS KMS key alias instead of the ARN for the AWS KMS encryption key.

If an encryption key is not specified in KmsKeyId:

  • If ReplicationSourceIdentifier identifies an encrypted source, then Amazon DocumentDB uses the encryption key that is used to encrypt the source. Otherwise, Amazon DocumentDB uses your default encryption key.

  • If the StorageEncrypted parameter is true and ReplicationSourceIdentifier is not specified, Amazon DocumentDB uses your default encryption key.

AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS Region.

If you create a replica of an encrypted cluster in another AWS Region, you must set KmsKeyId to a KMS key ID that is valid in the destination AWS Region. This key is used to encrypt the replica in that AWS Region.

" + "documentation":"

The AWS KMS key identifier for an encrypted cluster.

The AWS KMS key identifier is the Amazon Resource Name (ARN) for the AWS KMS encryption key. If you are creating a cluster using the same AWS account that owns the AWS KMS encryption key that is used to encrypt the new cluster, you can use the AWS KMS key alias instead of the ARN for the AWS KMS encryption key.

If an encryption key is not specified in KmsKeyId:

  • If the StorageEncrypted parameter is true, Amazon DocumentDB uses your default encryption key.

AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS Region.

" }, "PreSignedUrl":{ "shape":"String", diff --git a/services/dynamodb/pom.xml b/services/dynamodb/pom.xml index 11d11eb800f3..f4517b180827 100644 --- a/services/dynamodb/pom.xml +++ b/services/dynamodb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT dynamodb AWS Java SDK :: Services :: Amazon DynamoDB diff --git a/services/dynamodb/src/main/resources/codegen-resources/dynamodb/paginators-1.json b/services/dynamodb/src/main/resources/codegen-resources/dynamodb/paginators-1.json index ab2d5c8c08f4..69fadc92a5e4 100755 --- a/services/dynamodb/src/main/resources/codegen-resources/dynamodb/paginators-1.json +++ b/services/dynamodb/src/main/resources/codegen-resources/dynamodb/paginators-1.json @@ -9,6 +9,11 @@ "limit_key": "MaxResults", "output_token": "NextToken" }, + "ListExports": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken" + }, "ListTables": { "input_token": "ExclusiveStartTableName", "limit_key": "Limit", diff --git a/services/dynamodb/src/main/resources/codegen-resources/dynamodb/service-2.json b/services/dynamodb/src/main/resources/codegen-resources/dynamodb/service-2.json index 7fcd32ff28e8..4fcc27691301 100755 --- a/services/dynamodb/src/main/resources/codegen-resources/dynamodb/service-2.json +++ b/services/dynamodb/src/main/resources/codegen-resources/dynamodb/service-2.json @@ -13,6 +13,20 @@ "uid":"dynamodb-2012-08-10" }, "operations":{ + "BatchExecuteStatement":{ + "name":"BatchExecuteStatement", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchExecuteStatementInput"}, + "output":{"shape":"BatchExecuteStatementOutput"}, + "errors":[ + {"shape":"RequestLimitExceeded"}, + {"shape":"InternalServerError"} + ], + "documentation":"

This operation allows you to perform batch reads and writes on data stored in DynamoDB, using PartiQL.

" + }, "BatchGetItem":{ "name":"BatchGetItem", "http":{ @@ -219,6 +233,21 @@ "documentation":"

Returns the regional endpoint information.

", "endpointoperation":true }, + "DescribeExport":{ + "name":"DescribeExport", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeExportInput"}, + "output":{"shape":"DescribeExportOutput"}, + "errors":[ + {"shape":"ExportNotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalServerError"} + ], + "documentation":"

Describes an existing table export.

" + }, "DescribeGlobalTable":{ "name":"DescribeGlobalTable", "http":{ @@ -251,6 +280,22 @@ "endpointdiscovery":{ } }, + "DescribeKinesisStreamingDestination":{ + "name":"DescribeKinesisStreamingDestination", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeKinesisStreamingDestinationInput"}, + "output":{"shape":"DescribeKinesisStreamingDestinationOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerError"} + ], + "documentation":"

Returns information about the status of Kinesis streaming.

", + "endpointdiscovery":{ + } + }, "DescribeLimits":{ "name":"DescribeLimits", "http":{ @@ -312,6 +357,99 @@ "endpointdiscovery":{ } }, + "DisableKinesisStreamingDestination":{ + "name":"DisableKinesisStreamingDestination", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"KinesisStreamingDestinationInput"}, + "output":{"shape":"KinesisStreamingDestinationOutput"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceInUseException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Stops replication from the DynamoDB table to the Kinesis data stream. This is done without deleting either of the resources.

", + "endpointdiscovery":{ + } + }, + "EnableKinesisStreamingDestination":{ + "name":"EnableKinesisStreamingDestination", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"KinesisStreamingDestinationInput"}, + "output":{"shape":"KinesisStreamingDestinationOutput"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceInUseException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Starts table data replication to the specified Kinesis data stream at a timestamp chosen during the enable workflow. If this operation doesn't return results immediately, use DescribeKinesisStreamingDestination to check if streaming to the Kinesis data stream is ACTIVE.

", + "endpointdiscovery":{ + } + }, + "ExecuteStatement":{ + "name":"ExecuteStatement", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ExecuteStatementInput"}, + "output":{"shape":"ExecuteStatementOutput"}, + "errors":[ + {"shape":"ConditionalCheckFailedException"}, + {"shape":"ProvisionedThroughputExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ItemCollectionSizeLimitExceededException"}, + {"shape":"TransactionConflictException"}, + {"shape":"RequestLimitExceeded"}, + {"shape":"InternalServerError"}, + {"shape":"DuplicateItemException"} + ], + "documentation":"

This operation allows you to perform reads and singleton writes on data stored in DynamoDB, using PartiQL.

" + }, + "ExecuteTransaction":{ + "name":"ExecuteTransaction", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ExecuteTransactionInput"}, + "output":{"shape":"ExecuteTransactionOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"TransactionCanceledException"}, + {"shape":"TransactionInProgressException"}, + {"shape":"IdempotentParameterMismatchException"}, + {"shape":"ProvisionedThroughputExceededException"}, + {"shape":"RequestLimitExceeded"}, + {"shape":"InternalServerError"} + ], + "documentation":"

This operation allows you to perform transactional reads or writes on data stored in DynamoDB, using PartiQL.

" + }, + "ExportTableToPointInTime":{ + "name":"ExportTableToPointInTime", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ExportTableToPointInTimeInput"}, + "output":{"shape":"ExportTableToPointInTimeOutput"}, + "errors":[ + {"shape":"TableNotFoundException"}, + {"shape":"PointInTimeRecoveryUnavailableException"}, + {"shape":"LimitExceededException"}, + {"shape":"InvalidExportTimeException"}, + {"shape":"ExportConflictException"}, + {"shape":"InternalServerError"} + ], + "documentation":"

Exports table data to an S3 bucket. The table must have point in time recovery enabled, and you can export data from any time within the point in time recovery window.

" + }, "GetItem":{ "name":"GetItem", "http":{ @@ -359,6 +497,20 @@ ], "documentation":"

Returns a list of ContributorInsightsSummary for a table and all its global secondary indexes.

" }, + "ListExports":{ + "name":"ListExports", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListExportsInput"}, + "output":{"shape":"ListExportsOutput"}, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"InternalServerError"} + ], + "documentation":"

Lists completed exports within the past 90 days.

" + }, "ListGlobalTables":{ "name":"ListGlobalTables", "http":{ @@ -1169,6 +1321,25 @@ "max":100, "min":1 }, + "BatchExecuteStatementInput":{ + "type":"structure", + "required":["Statements"], + "members":{ + "Statements":{ + "shape":"PartiQLBatchRequest", + "documentation":"

The list of PartiQL statements representing the batch to run.

" + } + } + }, + "BatchExecuteStatementOutput":{ + "type":"structure", + "members":{ + "Responses":{ + "shape":"PartiQLBatchResponse", + "documentation":"

The response to each PartiQL statement in the batch.

" + } + } + }, "BatchGetItemInput":{ "type":"structure", "required":["RequestItems"], @@ -1211,6 +1382,73 @@ "key":{"shape":"TableName"}, "value":{"shape":"ItemList"} }, + "BatchStatementError":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"BatchStatementErrorCodeEnum", + "documentation":"

The error code associated with the failed PartiQL batch statement.

" + }, + "Message":{ + "shape":"String", + "documentation":"

The error message associated with the PartiQL batch resposne.

" + } + }, + "documentation":"

An error associated with a statement in a PartiQL batch that was run.

" + }, + "BatchStatementErrorCodeEnum":{ + "type":"string", + "enum":[ + "ConditionalCheckFailed", + "ItemCollectionSizeLimitExceeded", + "RequestLimitExceeded", + "ValidationError", + "ProvisionedThroughputExceeded", + "TransactionConflict", + "ThrottlingError", + "InternalServerError", + "ResourceNotFound", + "AccessDenied", + "DuplicateItem" + ] + }, + "BatchStatementRequest":{ + "type":"structure", + "required":["Statement"], + "members":{ + "Statement":{ + "shape":"PartiQLStatement", + "documentation":"

A valid PartiQL statement.

" + }, + "Parameters":{ + "shape":"PreparedStatementParameters", + "documentation":"

The parameters associated with a PartiQL statement in the batch request.

" + }, + "ConsistentRead":{ + "shape":"ConsistentRead", + "documentation":"

The read consistency of the PartiQL batch request.

" + } + }, + "documentation":"

A PartiQL batch statement request.

" + }, + "BatchStatementResponse":{ + "type":"structure", + "members":{ + "Error":{ + "shape":"BatchStatementError", + "documentation":"

The error associated with a failed PartiQL batch statement.

" + }, + "TableName":{ + "shape":"TableName", + "documentation":"

The table name associated with a failed PartiQL batch statement.

" + }, + "Item":{ + "shape":"AttributeMap", + "documentation":"

A DynamoDB item associated with a BatchStatementResponse

" + } + }, + "documentation":"

A PartiQL batch statement response..

" + }, "BatchWriteItemInput":{ "type":"structure", "required":["RequestItems"], @@ -1252,6 +1490,10 @@ "max":25, "min":1 }, + "BilledSizeBytes":{ + "type":"long", + "min":0 + }, "BillingMode":{ "type":"string", "enum":[ @@ -1327,6 +1569,7 @@ "max":36, "min":1 }, + "ClientToken":{"type":"string"}, "Code":{"type":"string"}, "ComparisonOperator":{ "type":"string", @@ -1530,7 +1773,7 @@ "documentation":"

Describes the current status for contributor insights for the given table and index, if applicable.

" } }, - "documentation":"

Represents a Contributor Insights summary entry..

" + "documentation":"

Represents a Contributor Insights summary entry.

" }, "CreateBackupInput":{ "type":"structure", @@ -1986,6 +2229,25 @@ } } }, + "DescribeExportInput":{ + "type":"structure", + "required":["ExportArn"], + "members":{ + "ExportArn":{ + "shape":"ExportArn", + "documentation":"

The Amazon Resource Name (ARN) associated with the export.

" + } + } + }, + "DescribeExportOutput":{ + "type":"structure", + "members":{ + "ExportDescription":{ + "shape":"ExportDescription", + "documentation":"

Represents the properties of the export.

" + } + } + }, "DescribeGlobalTableInput":{ "type":"structure", "required":["GlobalTableName"], @@ -2028,6 +2290,29 @@ } } }, + "DescribeKinesisStreamingDestinationInput":{ + "type":"structure", + "required":["TableName"], + "members":{ + "TableName":{ + "shape":"TableName", + "documentation":"

The name of the table being described.

" + } + } + }, + "DescribeKinesisStreamingDestinationOutput":{ + "type":"structure", + "members":{ + "TableName":{ + "shape":"TableName", + "documentation":"

The name of the table being described.

" + }, + "KinesisDataStreamDestinations":{ + "shape":"KinesisDataStreamDestinations", + "documentation":"

The list of replica structures for the table being described.

" + } + } + }, "DescribeLimitsInput":{ "type":"structure", "members":{ @@ -2115,7 +2400,25 @@ } } }, + "DestinationStatus":{ + "type":"string", + "enum":[ + "ENABLING", + "ACTIVE", + "DISABLING", + "DISABLED", + "ENABLE_FAILED" + ] + }, "Double":{"type":"double"}, + "DuplicateItemException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

There was an attempt to insert an item with the same primary key as an item that already exists in the DynamoDB table.

", + "exception":true + }, "Endpoint":{ "type":"structure", "required":[ @@ -2141,6 +2444,65 @@ "ErrorMessage":{"type":"string"}, "ExceptionDescription":{"type":"string"}, "ExceptionName":{"type":"string"}, + "ExecuteStatementInput":{ + "type":"structure", + "required":["Statement"], + "members":{ + "Statement":{ + "shape":"PartiQLStatement", + "documentation":"

The PartiQL statement representing the operation to run.

" + }, + "Parameters":{ + "shape":"PreparedStatementParameters", + "documentation":"

The parameters for the PartiQL statement, if any.

" + }, + "ConsistentRead":{ + "shape":"ConsistentRead", + "documentation":"

The consistency of a read operation. If set to true, then a strongly consistent read is used; otherwise, an eventually consistent read is used.

" + }, + "NextToken":{ + "shape":"PartiQLNextToken", + "documentation":"

Set this value to get remaining results, if NextToken was returned in the statement response.

" + } + } + }, + "ExecuteStatementOutput":{ + "type":"structure", + "members":{ + "Items":{ + "shape":"ItemList", + "documentation":"

If a read operation was used, this property will contain the result of the reade operation; a map of attribute names and their values. For the write operations this value will be empty.

" + }, + "NextToken":{ + "shape":"PartiQLNextToken", + "documentation":"

If the response of a read request exceeds the response payload limit DynamoDB will set this value in the response. If set, you can use that this value in the subsequent request to get the remaining results.

" + } + } + }, + "ExecuteTransactionInput":{ + "type":"structure", + "required":["TransactStatements"], + "members":{ + "TransactStatements":{ + "shape":"ParameterizedStatements", + "documentation":"

The list of PartiQL statements representing the transaction to run.

" + }, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

Set this value to get remaining results, if NextToken was returned in the statement response.

", + "idempotencyToken":true + } + } + }, + "ExecuteTransactionOutput":{ + "type":"structure", + "members":{ + "Responses":{ + "shape":"ItemResponseList", + "documentation":"

The response to a PartiQL transaction.

" + } + } + }, "ExpectedAttributeMap":{ "type":"map", "key":{"shape":"AttributeName"}, @@ -2168,6 +2530,202 @@ }, "documentation":"

Represents a condition to be compared with an attribute value. This condition can be used with DeleteItem, PutItem, or UpdateItem operations; if the comparison evaluates to true, the operation succeeds; if not, the operation fails. You can use ExpectedAttributeValue in one of two different ways:

  • Use AttributeValueList to specify one or more values to compare against an attribute. Use ComparisonOperator to specify how you want to perform the comparison. If the comparison evaluates to true, then the conditional operation succeeds.

  • Use Value to specify a value that DynamoDB will compare against an attribute. If the values match, then ExpectedAttributeValue evaluates to true and the conditional operation succeeds. Optionally, you can also set Exists to false, indicating that you do not expect to find the attribute value in the table. In this case, the conditional operation succeeds only if the comparison evaluates to false.

Value and Exists are incompatible with AttributeValueList and ComparisonOperator. Note that if you use both sets of parameters at once, DynamoDB will return a ValidationException exception.

" }, + "ExportArn":{ + "type":"string", + "max":1024, + "min":37 + }, + "ExportConflictException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

There was a conflict when writing to the specified S3 bucket.

", + "exception":true + }, + "ExportDescription":{ + "type":"structure", + "members":{ + "ExportArn":{ + "shape":"ExportArn", + "documentation":"

The Amazon Resource Name (ARN) of the table export.

" + }, + "ExportStatus":{ + "shape":"ExportStatus", + "documentation":"

Export can be in one of the following states: IN_PROGRESS, COMPLETED, or FAILED.

" + }, + "StartTime":{ + "shape":"ExportStartTime", + "documentation":"

The time at which the export task began.

" + }, + "EndTime":{ + "shape":"ExportEndTime", + "documentation":"

The time at which the export task completed.

" + }, + "ExportManifest":{ + "shape":"ExportManifest", + "documentation":"

The name of the manifest file for the export task.

" + }, + "TableArn":{ + "shape":"TableArn", + "documentation":"

The Amazon Resource Name (ARN) of the table that was exported.

" + }, + "TableId":{ + "shape":"TableId", + "documentation":"

Unique ID of the table that was exported.

" + }, + "ExportTime":{ + "shape":"ExportTime", + "documentation":"

Point in time from which table data was exported.

" + }, + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

The client token that was provided for the export task. A client token makes calls to ExportTableToPointInTimeInput idempotent, meaning that multiple identical calls have the same effect as one single call.

" + }, + "S3Bucket":{ + "shape":"S3Bucket", + "documentation":"

The name of the Amazon S3 bucket containing the export.

" + }, + "S3BucketOwner":{ + "shape":"S3BucketOwner", + "documentation":"

The ID of the AWS account that owns the bucket containing the export.

" + }, + "S3Prefix":{ + "shape":"S3Prefix", + "documentation":"

The Amazon S3 bucket prefix used as the file name and path of the exported snapshot.

" + }, + "S3SseAlgorithm":{ + "shape":"S3SseAlgorithm", + "documentation":"

Type of encryption used on the bucket where export data is stored. Valid values for S3SseAlgorithm are:

  • AES256 - server-side encryption with Amazon S3 managed keys

  • KMS - server-side encryption with AWS KMS managed keys

" + }, + "S3SseKmsKeyId":{ + "shape":"S3SseKmsKeyId", + "documentation":"

The ID of the AWS KMS managed key used to encrypt the S3 bucket where export data is stored (if applicable).

" + }, + "FailureCode":{ + "shape":"FailureCode", + "documentation":"

Status code for the result of the failed export.

" + }, + "FailureMessage":{ + "shape":"FailureMessage", + "documentation":"

Export failure reason description.

" + }, + "ExportFormat":{ + "shape":"ExportFormat", + "documentation":"

The format of the exported data. Valid values for ExportFormat are DYNAMODB_JSON or ION.

" + }, + "BilledSizeBytes":{ + "shape":"BilledSizeBytes", + "documentation":"

The billable size of the table export.

" + }, + "ItemCount":{ + "shape":"ItemCount", + "documentation":"

The number of items exported.

" + } + }, + "documentation":"

Represents the properties of the exported table.

" + }, + "ExportEndTime":{"type":"timestamp"}, + "ExportFormat":{ + "type":"string", + "enum":[ + "DYNAMODB_JSON", + "ION" + ] + }, + "ExportManifest":{"type":"string"}, + "ExportNextToken":{"type":"string"}, + "ExportNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

The specified export was not found.

", + "exception":true + }, + "ExportStartTime":{"type":"timestamp"}, + "ExportStatus":{ + "type":"string", + "enum":[ + "IN_PROGRESS", + "COMPLETED", + "FAILED" + ] + }, + "ExportSummaries":{ + "type":"list", + "member":{"shape":"ExportSummary"} + }, + "ExportSummary":{ + "type":"structure", + "members":{ + "ExportArn":{ + "shape":"ExportArn", + "documentation":"

The Amazon Resource Name (ARN) of the export.

" + }, + "ExportStatus":{ + "shape":"ExportStatus", + "documentation":"

Export can be in one of the following states: IN_PROGRESS, COMPLETED, or FAILED.

" + } + }, + "documentation":"

Summary information about an export task.

" + }, + "ExportTableToPointInTimeInput":{ + "type":"structure", + "required":[ + "TableArn", + "S3Bucket" + ], + "members":{ + "TableArn":{ + "shape":"TableArn", + "documentation":"

The Amazon Resource Name (ARN) associated with the table to export.

" + }, + "ExportTime":{ + "shape":"ExportTime", + "documentation":"

Time in the past from which to export table data. The table export will be a snapshot of the table's state at this point in time.

" + }, + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

Providing a ClientToken makes the call to ExportTableToPointInTimeInput idempotent, meaning that multiple identical calls have the same effect as one single call.

A client token is valid for 8 hours after the first request that uses it is completed. After 8 hours, any request with the same client token is treated as a new request. Do not resubmit the same request with the same client token for more than 8 hours, or the result might not be idempotent.

If you submit a request with the same client token but a change in other parameters within the 8-hour idempotency window, DynamoDB returns an IdempotentParameterMismatch exception.

", + "idempotencyToken":true + }, + "S3Bucket":{ + "shape":"S3Bucket", + "documentation":"

The name of the Amazon S3 bucket to export the snapshot to.

" + }, + "S3BucketOwner":{ + "shape":"S3BucketOwner", + "documentation":"

The ID of the AWS account that owns the bucket the export will be stored in.

" + }, + "S3Prefix":{ + "shape":"S3Prefix", + "documentation":"

The Amazon S3 bucket prefix to use as the file name and path of the exported snapshot.

" + }, + "S3SseAlgorithm":{ + "shape":"S3SseAlgorithm", + "documentation":"

Type of encryption used on the bucket where export data will be stored. Valid values for S3SseAlgorithm are:

  • AES256 - server-side encryption with Amazon S3 managed keys

  • KMS - server-side encryption with AWS KMS managed keys

" + }, + "S3SseKmsKeyId":{ + "shape":"S3SseKmsKeyId", + "documentation":"

The ID of the AWS KMS managed key used to encrypt the S3 bucket where export data will be stored (if applicable).

" + }, + "ExportFormat":{ + "shape":"ExportFormat", + "documentation":"

The format for the exported data. Valid values for ExportFormat are DYNAMODB_JSON or ION.

" + } + } + }, + "ExportTableToPointInTimeOutput":{ + "type":"structure", + "members":{ + "ExportDescription":{ + "shape":"ExportDescription", + "documentation":"

Contains a description of the table export.

" + } + } + }, + "ExportTime":{"type":"timestamp"}, "ExpressionAttributeNameMap":{ "type":"map", "key":{"shape":"ExpressionAttributeNameVariable"}, @@ -2180,6 +2738,7 @@ "value":{"shape":"AttributeValue"} }, "ExpressionAttributeValueVariable":{"type":"string"}, + "FailureCode":{"type":"string"}, "FailureException":{ "type":"structure", "members":{ @@ -2194,6 +2753,7 @@ }, "documentation":"

Represents a failure a contributor insights operation.

" }, + "FailureMessage":{"type":"string"}, "FilterConditionMap":{ "type":"map", "key":{"shape":"AttributeName"}, @@ -2555,6 +3115,14 @@ "exception":true, "fault":true }, + "InvalidExportTimeException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

The specified ExportTime is outside of the point in time recovery window.

", + "exception":true + }, "InvalidRestoreTimeException":{ "type":"structure", "members":{ @@ -2713,6 +3281,62 @@ }, "documentation":"

Represents a set of primary keys and, for each key, the attributes to retrieve from the table.

For each primary key, you must provide all of the key attributes. For example, with a simple primary key, you only need to provide the partition key. For a composite primary key, you must provide both the partition key and the sort key.

" }, + "KinesisDataStreamDestination":{ + "type":"structure", + "members":{ + "StreamArn":{ + "shape":"StreamArn", + "documentation":"

The ARN for a specific Kinesis data stream.

" + }, + "DestinationStatus":{ + "shape":"DestinationStatus", + "documentation":"

The current status of replication.

" + }, + "DestinationStatusDescription":{ + "shape":"String", + "documentation":"

The human-readable string that corresponds to the replica status.

" + } + }, + "documentation":"

Describes a Kinesis data stream destination.

" + }, + "KinesisDataStreamDestinations":{ + "type":"list", + "member":{"shape":"KinesisDataStreamDestination"} + }, + "KinesisStreamingDestinationInput":{ + "type":"structure", + "required":[ + "TableName", + "StreamArn" + ], + "members":{ + "TableName":{ + "shape":"TableName", + "documentation":"

The name of the DynamoDB table.

" + }, + "StreamArn":{ + "shape":"StreamArn", + "documentation":"

The ARN for a Kinesis data stream.

" + } + } + }, + "KinesisStreamingDestinationOutput":{ + "type":"structure", + "members":{ + "TableName":{ + "shape":"TableName", + "documentation":"

The name of the table being modified.

" + }, + "StreamArn":{ + "shape":"StreamArn", + "documentation":"

The ARN for the specific Kinesis data stream.

" + }, + "DestinationStatus":{ + "shape":"DestinationStatus", + "documentation":"

The current status of the replication.

" + } + } + }, "LastUpdateDateTime":{"type":"timestamp"}, "LimitExceededException":{ "type":"structure", @@ -2805,6 +3429,41 @@ } } }, + "ListExportsInput":{ + "type":"structure", + "members":{ + "TableArn":{ + "shape":"TableArn", + "documentation":"

The Amazon Resource Name (ARN) associated with the exported table.

" + }, + "MaxResults":{ + "shape":"ListExportsMaxLimit", + "documentation":"

Maximum number of results to return per page.

" + }, + "NextToken":{ + "shape":"ExportNextToken", + "documentation":"

An optional string that, if supplied, must be copied from the output of a previous call to ListExports. When provided in this manner, the API fetches the next page of results.

" + } + } + }, + "ListExportsMaxLimit":{ + "type":"integer", + "max":25, + "min":1 + }, + "ListExportsOutput":{ + "type":"structure", + "members":{ + "ExportSummaries":{ + "shape":"ExportSummaries", + "documentation":"

A list of ExportSummary objects.

" + }, + "NextToken":{ + "shape":"ExportNextToken", + "documentation":"

If this value is returned, there are additional results to be displayed. To retrieve them, call ListExports again, with NextToken set to this value.

" + } + } + }, "ListGlobalTablesInput":{ "type":"structure", "members":{ @@ -3006,6 +3665,47 @@ "type":"list", "member":{"shape":"NumberAttributeValue"} }, + "ParameterizedStatement":{ + "type":"structure", + "required":["Statement"], + "members":{ + "Statement":{ + "shape":"PartiQLStatement", + "documentation":"

A PartiQL statment that uses parameters.

" + }, + "Parameters":{ + "shape":"PreparedStatementParameters", + "documentation":"

The parameter values.

" + } + }, + "documentation":"

Represents a PartiQL statment that uses parameters.

" + }, + "ParameterizedStatements":{ + "type":"list", + "member":{"shape":"ParameterizedStatement"}, + "max":25, + "min":1 + }, + "PartiQLBatchRequest":{ + "type":"list", + "member":{"shape":"BatchStatementRequest"}, + "max":25, + "min":1 + }, + "PartiQLBatchResponse":{ + "type":"list", + "member":{"shape":"BatchStatementResponse"} + }, + "PartiQLNextToken":{ + "type":"string", + "max":32768, + "min":1 + }, + "PartiQLStatement":{ + "type":"string", + "max":8192, + "min":1 + }, "PointInTimeRecoveryDescription":{ "type":"structure", "members":{ @@ -3058,6 +3758,11 @@ "type":"long", "min":1 }, + "PreparedStatementParameters":{ + "type":"list", + "member":{"shape":"AttributeValue"}, + "min":1 + }, "Projection":{ "type":"structure", "members":{ @@ -3432,7 +4137,7 @@ }, "ReplicaStatus":{ "shape":"ReplicaStatus", - "documentation":"

The current state of the replica:

  • CREATING - The replica is being created.

  • UPDATING - The replica is being updated.

  • DELETING - The replica is being deleted.

  • ACTIVE - The replica is ready for use.

  • REGION_DISABLED - The replica is inaccessible because the AWS Region has been disabled.

    If the AWS Region remains inaccessible for more than 20 hours, DynamoDB will remove this replica from the replication group. The replica will not be deleted and replication will stop from and to this region.

" + "documentation":"

The current state of the replica:

  • CREATING - The replica is being created.

  • UPDATING - The replica is being updated.

  • DELETING - The replica is being deleted.

  • ACTIVE - The replica is ready for use.

  • REGION_DISABLED - The replica is inaccessible because the AWS Region has been disabled.

    If the AWS Region remains inaccessible for more than 20 hours, DynamoDB will remove this replica from the replication group. The replica will not be deleted and replication will stop from and to this region.

  • INACCESSIBLE_ENCRYPTION_CREDENTIALS - The AWS KMS key used to encrypt the table is inaccessible.

    If the AWS KMS key remains inaccessible for more than 20 hours, DynamoDB will remove this replica from the replication group. The replica will not be deleted and replication will stop from and to this region.

" }, "ReplicaStatusDescription":{ "shape":"ReplicaStatusDescription", @@ -3690,7 +4395,8 @@ "UPDATING", "DELETING", "ACTIVE", - "REGION_DISABLED" + "REGION_DISABLED", + "INACCESSIBLE_ENCRYPTION_CREDENTIALS" ] }, "ReplicaStatusDescription":{"type":"string"}, @@ -3932,6 +4638,21 @@ "NONE" ] }, + "S3Bucket":{"type":"string"}, + "S3BucketOwner":{"type":"string"}, + "S3Prefix":{"type":"string"}, + "S3SseAlgorithm":{ + "type":"string", + "enum":[ + "AES256", + "KMS" + ] + }, + "S3SseKmsKeyId":{ + "type":"string", + "max":2048, + "min":1 + }, "SSEDescription":{ "type":"structure", "members":{ diff --git a/services/ebs/pom.xml b/services/ebs/pom.xml index 64e861ff438e..76583a2b901b 100644 --- a/services/ebs/pom.xml +++ b/services/ebs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT ebs AWS Java SDK :: Services :: EBS diff --git a/services/ec2/pom.xml b/services/ec2/pom.xml index 96e44dd4f99d..0d5f786b005c 100644 --- a/services/ec2/pom.xml +++ b/services/ec2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT ec2 AWS Java SDK :: Services :: Amazon EC2 diff --git a/services/ec2/src/main/resources/codegen-resources/service-2.json b/services/ec2/src/main/resources/codegen-resources/service-2.json index 812425860223..35b3754f39c6 100755 --- a/services/ec2/src/main/resources/codegen-resources/service-2.json +++ b/services/ec2/src/main/resources/codegen-resources/service-2.json @@ -151,6 +151,16 @@ "input":{"shape":"AssociateDhcpOptionsRequest"}, "documentation":"

Associates a set of DHCP options (that you've previously created) with the specified VPC, or associates no DHCP options with the VPC.

After you associate the options with the VPC, any existing instances and all new instances that you launch in that VPC use the options. You don't need to restart or relaunch the instances. They automatically pick up the changes within a few hours, depending on how frequently the instance renews its DHCP lease. You can explicitly renew the lease using the operating system on the instance.

For more information, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

" }, + "AssociateEnclaveCertificateIamRole":{ + "name":"AssociateEnclaveCertificateIamRole", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssociateEnclaveCertificateIamRoleRequest"}, + "output":{"shape":"AssociateEnclaveCertificateIamRoleResult"}, + "documentation":"

Associates an AWS Identity and Access Management (IAM) role with an AWS Certificate Manager (ACM) certificate. This enables the certificate to be used by the ACM for Nitro Enclaves application inside an enclave. For more information, see AWS Certificate Manager for Nitro Enclaves in the AWS Nitro Enclaves User Guide.

When the IAM role is associated with the ACM certificate, places the certificate, certificate chain, and encrypted private key in an Amazon S3 bucket that only the associated IAM role can access. The private key of the certificate is encrypted with an AWS-managed KMS customer master (CMK) that has an attached attestation-based CMK policy.

To enable the IAM role to access the Amazon S3 object, you must grant it permission to call s3:GetObject on the Amazon S3 bucket returned by the command. To enable the IAM role to access the AWS KMS CMK, you must grant it permission to call kms:Decrypt on AWS KMS CMK returned by the command. For more information, see Grant the role permission to access the certificate and encryption key in the AWS Nitro Enclaves User Guide.

" + }, "AssociateIamInstanceProfile":{ "name":"AssociateIamInstanceProfile", "http":{ @@ -248,7 +258,7 @@ }, "input":{"shape":"AttachVolumeRequest"}, "output":{"shape":"VolumeAttachment"}, - "documentation":"

Attaches an EBS volume to a running or stopped instance and exposes it to the instance with the specified device name.

Encrypted EBS volumes must be attached to instances that support Amazon EBS encryption. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

After you attach an EBS volume, you must make it available. For more information, see Making an EBS volume available for use.

If a volume has an AWS Marketplace product code:

  • The volume can be attached only to a stopped instance.

  • AWS Marketplace product codes are copied from the volume to the instance.

  • You must be subscribed to the product.

  • The instance type and operating system of the instance must support the product. For example, you can't detach a volume from a Windows instance and attach it to a Linux instance.

For more information, see Attaching Amazon EBS volumes in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Attaches an EBS volume to a running or stopped instance and exposes it to the instance with the specified device name.

Encrypted EBS volumes must be attached to instances that support Amazon EBS encryption. For more information, see Amazon EBS encryption in the Amazon Elastic Compute Cloud User Guide.

After you attach an EBS volume, you must make it available. For more information, see Making an EBS volume available for use.

If a volume has an AWS Marketplace product code:

  • The volume can be attached only to a stopped instance.

  • AWS Marketplace product codes are copied from the volume to the instance.

  • You must be subscribed to the product.

  • The instance type and operating system of the instance must support the product. For example, you can't detach a volume from a Windows instance and attach it to a Linux instance.

For more information, see Attaching Amazon EBS volumes in the Amazon Elastic Compute Cloud User Guide.

" }, "AttachVpnGateway":{ "name":"AttachVpnGateway", @@ -733,7 +743,7 @@ }, "input":{"shape":"CreateSnapshotRequest"}, "output":{"shape":"Snapshot"}, - "documentation":"

Creates a snapshot of an EBS volume and stores it in Amazon S3. You can use snapshots for backups, to make copies of EBS volumes, and to save data before shutting down an instance.

When a snapshot is created, any AWS Marketplace product codes that are associated with the source volume are propagated to the snapshot.

You can take a snapshot of an attached volume that is in use. However, snapshots only capture data that has been written to your EBS volume at the time the snapshot command is issued; this may exclude any data that has been cached by any applications or the operating system. If you can pause any file systems on the volume long enough to take a snapshot, your snapshot should be complete. However, if you cannot pause all file writes to the volume, you should unmount the volume from within the instance, issue the snapshot command, and then remount the volume to ensure a consistent and complete snapshot. You may remount and use your volume while the snapshot status is pending.

To create a snapshot for EBS volumes that serve as root devices, you should stop the instance before taking the snapshot.

Snapshots that are taken from encrypted volumes are automatically encrypted. Volumes that are created from encrypted snapshots are also automatically encrypted. Your encrypted volumes and any associated snapshots always remain protected.

You can tag your snapshots during creation. For more information, see Tagging your Amazon EC2 resources in the Amazon Elastic Compute Cloud User Guide.

For more information, see Amazon Elastic Block Store and Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Creates a snapshot of an EBS volume and stores it in Amazon S3. You can use snapshots for backups, to make copies of EBS volumes, and to save data before shutting down an instance.

When a snapshot is created, any AWS Marketplace product codes that are associated with the source volume are propagated to the snapshot.

You can take a snapshot of an attached volume that is in use. However, snapshots only capture data that has been written to your EBS volume at the time the snapshot command is issued; this might exclude any data that has been cached by any applications or the operating system. If you can pause any file systems on the volume long enough to take a snapshot, your snapshot should be complete. However, if you cannot pause all file writes to the volume, you should unmount the volume from within the instance, issue the snapshot command, and then remount the volume to ensure a consistent and complete snapshot. You may remount and use your volume while the snapshot status is pending.

To create a snapshot for EBS volumes that serve as root devices, you should stop the instance before taking the snapshot.

Snapshots that are taken from encrypted volumes are automatically encrypted. Volumes that are created from encrypted snapshots are also automatically encrypted. Your encrypted volumes and any associated snapshots always remain protected.

You can tag your snapshots during creation. For more information, see Tagging your Amazon EC2 resources in the Amazon Elastic Compute Cloud User Guide.

For more information, see Amazon Elastic Block Store and Amazon EBS encryption in the Amazon Elastic Compute Cloud User Guide.

" }, "CreateSnapshots":{ "name":"CreateSnapshots", @@ -892,7 +902,7 @@ }, "input":{"shape":"CreateVolumeRequest"}, "output":{"shape":"Volume"}, - "documentation":"

Creates an EBS volume that can be attached to an instance in the same Availability Zone. The volume is created in the regional endpoint that you send the HTTP request to. For more information see Regions and Endpoints.

You can create a new empty volume or restore a volume from an EBS snapshot. Any AWS Marketplace product codes from the snapshot are propagated to the volume.

You can create encrypted volumes. Encrypted volumes must be attached to instances that support Amazon EBS encryption. Volumes that are created from encrypted snapshots are also automatically encrypted. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

You can tag your volumes during creation. For more information, see Tagging your Amazon EC2 resources in the Amazon Elastic Compute Cloud User Guide.

For more information, see Creating an Amazon EBS volume in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Creates an EBS volume that can be attached to an instance in the same Availability Zone.

You can create a new empty volume or restore a volume from an EBS snapshot. Any AWS Marketplace product codes from the snapshot are propagated to the volume.

You can create encrypted volumes. Encrypted volumes must be attached to instances that support Amazon EBS encryption. Volumes that are created from encrypted snapshots are also automatically encrypted. For more information, see Amazon EBS encryption in the Amazon Elastic Compute Cloud User Guide.

You can tag your volumes during creation. For more information, see Tagging your Amazon EC2 resources in the Amazon Elastic Compute Cloud User Guide.

For more information, see Creating an Amazon EBS volume in the Amazon Elastic Compute Cloud User Guide.

" }, "CreateVpc":{ "name":"CreateVpc", @@ -912,7 +922,7 @@ }, "input":{"shape":"CreateVpcEndpointRequest"}, "output":{"shape":"CreateVpcEndpointResult"}, - "documentation":"

Creates a VPC endpoint for a specified service. An endpoint enables you to create a private connection between your VPC and the service. The service may be provided by AWS, an AWS Marketplace Partner, or another AWS account. For more information, see VPC Endpoints in the Amazon Virtual Private Cloud User Guide.

A gateway endpoint serves as a target for a route in your route table for traffic destined for the AWS service. You can specify an endpoint policy to attach to the endpoint, which will control access to the service from your VPC. You can also specify the VPC route tables that use the endpoint.

An interface endpoint is a network interface in your subnet that serves as an endpoint for communicating with the specified service. You can specify the subnets in which to create an endpoint, and the security groups to associate with the endpoint network interface.

Use DescribeVpcEndpointServices to get a list of supported services.

" + "documentation":"

Creates a VPC endpoint for a specified service. An endpoint enables you to create a private connection between your VPC and the service. The service may be provided by AWS, an AWS Marketplace Partner, or another AWS account. For more information, see VPC Endpoints in the Amazon Virtual Private Cloud User Guide.

A gateway endpoint serves as a target for a route in your route table for traffic destined for the AWS service. You can specify an endpoint policy to attach to the endpoint, which will control access to the service from your VPC. You can also specify the VPC route tables that use the endpoint.

An interface endpoint is a network interface in your subnet that serves as an endpoint for communicating with the specified service. You can specify the subnets in which to create an endpoint, and the security groups to associate with the endpoint network interface.

A GatewayLoadBalancer endpoint is a network interface in your subnet that serves an endpoint for communicating with a Gateway Load Balancer that you've configured as a VPC endpoint service.

Use DescribeVpcEndpointServices to get a list of supported services.

" }, "CreateVpcEndpointConnectionNotification":{ "name":"CreateVpcEndpointConnectionNotification", @@ -932,7 +942,7 @@ }, "input":{"shape":"CreateVpcEndpointServiceConfigurationRequest"}, "output":{"shape":"CreateVpcEndpointServiceConfigurationResult"}, - "documentation":"

Creates a VPC endpoint service configuration to which service consumers (AWS accounts, IAM users, and IAM roles) can connect. Service consumers can create an interface VPC endpoint to connect to your service.

To create an endpoint service configuration, you must first create a Network Load Balancer for your service. For more information, see VPC Endpoint Services in the Amazon Virtual Private Cloud User Guide.

If you set the private DNS name, you must prove that you own the private DNS domain name. For more information, see VPC Endpoint Service Private DNS Name Verification in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Creates a VPC endpoint service configuration to which service consumers (AWS accounts, IAM users, and IAM roles) can connect.

To create an endpoint service configuration, you must first create one of the following for your service:

For more information, see VPC Endpoint Services in the Amazon Virtual Private Cloud User Guide.

If you set the private DNS name, you must prove that you own the private DNS domain name. For more information, see VPC Endpoint Service Private DNS Name Verification in the Amazon Virtual Private Cloud User Guide.

" }, "CreateVpcPeeringConnection":{ "name":"CreateVpcPeeringConnection", @@ -1039,7 +1049,7 @@ }, "input":{"shape":"DeleteFleetsRequest"}, "output":{"shape":"DeleteFleetsResult"}, - "documentation":"

Deletes the specified EC2 Fleet.

After you delete an EC2 Fleet, it launches no new instances. You must specify whether an EC2 Fleet should also terminate its instances. If you terminate the instances, the EC2 Fleet enters the deleted_terminating state. Otherwise, the EC2 Fleet enters the deleted_running state, and the instances continue to run until they are interrupted or you terminate them manually.

" + "documentation":"

Deletes the specified EC2 Fleet.

After you delete an EC2 Fleet, it launches no new instances.

You must specify whether a deleted EC2 Fleet should also terminate its instances. If you choose to terminate the instances, the EC2 Fleet enters the deleted_terminating state. Otherwise, the EC2 Fleet enters the deleted_running state, and the instances continue to run until they are interrupted or you terminate them manually.

For instant fleets, EC2 Fleet must terminate the instances when the fleet is deleted. A deleted instant fleet with running instances is not supported.

Restrictions

  • You can delete up to 25 instant fleets in a single request. If you exceed this number, no instant fleets are deleted and an error is returned. There is no restriction on the number of fleets of type maintain or request that can be deleted in a single request.

  • Up to 1000 instances can be terminated in a single request to delete instant fleets.

For more information, see Deleting an EC2 Fleet in the Amazon Elastic Compute Cloud User Guide.

" }, "DeleteFlowLogs":{ "name":"DeleteFlowLogs", @@ -1414,7 +1424,7 @@ }, "input":{"shape":"DeleteVpcEndpointsRequest"}, "output":{"shape":"DeleteVpcEndpointsResult"}, - "documentation":"

Deletes one or more specified VPC endpoints. Deleting a gateway endpoint also deletes the endpoint routes in the route tables that were associated with the endpoint. Deleting an interface endpoint deletes the endpoint network interfaces.

" + "documentation":"

Deletes one or more specified VPC endpoints. Deleting a gateway endpoint also deletes the endpoint routes in the route tables that were associated with the endpoint. Deleting an interface endpoint or a Gateway Load Balancer endpoint deletes the endpoint network interfaces. Gateway Load Balancer endpoints can only be deleted if the routes that are associated with the endpoint are deleted.

" }, "DeleteVpcPeeringConnection":{ "name":"DeleteVpcPeeringConnection", @@ -2290,7 +2300,7 @@ }, "input":{"shape":"DescribeSnapshotAttributeRequest"}, "output":{"shape":"DescribeSnapshotAttributeResult"}, - "documentation":"

Describes the specified attribute of the specified snapshot. You can specify only one attribute at a time.

For more information about EBS snapshots, see Amazon EBS Snapshots in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Describes the specified attribute of the specified snapshot. You can specify only one attribute at a time.

For more information about EBS snapshots, see Amazon EBS snapshots in the Amazon Elastic Compute Cloud User Guide.

" }, "DescribeSnapshots":{ "name":"DescribeSnapshots", @@ -2300,7 +2310,7 @@ }, "input":{"shape":"DescribeSnapshotsRequest"}, "output":{"shape":"DescribeSnapshotsResult"}, - "documentation":"

Describes the specified EBS snapshots available to you or all of the EBS snapshots available to you.

The snapshots available to you include public snapshots, private snapshots that you own, and private snapshots owned by other AWS accounts for which you have explicit create volume permissions.

The create volume permissions fall into the following categories:

  • public: The owner of the snapshot granted create volume permissions for the snapshot to the all group. All AWS accounts have create volume permissions for these snapshots.

  • explicit: The owner of the snapshot granted create volume permissions to a specific AWS account.

  • implicit: An AWS account has implicit create volume permissions for all snapshots it owns.

The list of snapshots returned can be filtered by specifying snapshot IDs, snapshot owners, or AWS accounts with create volume permissions. If no options are specified, Amazon EC2 returns all snapshots for which you have create volume permissions.

If you specify one or more snapshot IDs, only snapshots that have the specified IDs are returned. If you specify an invalid snapshot ID, an error is returned. If you specify a snapshot ID for which you do not have access, it is not included in the returned results.

If you specify one or more snapshot owners using the OwnerIds option, only snapshots from the specified owners and for which you have access are returned. The results can include the AWS account IDs of the specified owners, amazon for snapshots owned by Amazon, or self for snapshots that you own.

If you specify a list of restorable users, only snapshots with create snapshot permissions for those users are returned. You can specify AWS account IDs (if you own the snapshots), self for snapshots for which you own or have explicit permissions, or all for public snapshots.

If you are describing a long list of snapshots, we recommend that you paginate the output to make the list more manageable. The MaxResults parameter sets the maximum number of results returned in a single page. If the list of results exceeds your MaxResults value, then that number of results is returned along with a NextToken value that can be passed to a subsequent DescribeSnapshots request to retrieve the remaining results.

To get the state of fast snapshot restores for a snapshot, use DescribeFastSnapshotRestores.

For more information about EBS snapshots, see Amazon EBS Snapshots in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Describes the specified EBS snapshots available to you or all of the EBS snapshots available to you.

The snapshots available to you include public snapshots, private snapshots that you own, and private snapshots owned by other AWS accounts for which you have explicit create volume permissions.

The create volume permissions fall into the following categories:

  • public: The owner of the snapshot granted create volume permissions for the snapshot to the all group. All AWS accounts have create volume permissions for these snapshots.

  • explicit: The owner of the snapshot granted create volume permissions to a specific AWS account.

  • implicit: An AWS account has implicit create volume permissions for all snapshots it owns.

The list of snapshots returned can be filtered by specifying snapshot IDs, snapshot owners, or AWS accounts with create volume permissions. If no options are specified, Amazon EC2 returns all snapshots for which you have create volume permissions.

If you specify one or more snapshot IDs, only snapshots that have the specified IDs are returned. If you specify an invalid snapshot ID, an error is returned. If you specify a snapshot ID for which you do not have access, it is not included in the returned results.

If you specify one or more snapshot owners using the OwnerIds option, only snapshots from the specified owners and for which you have access are returned. The results can include the AWS account IDs of the specified owners, amazon for snapshots owned by Amazon, or self for snapshots that you own.

If you specify a list of restorable users, only snapshots with create snapshot permissions for those users are returned. You can specify AWS account IDs (if you own the snapshots), self for snapshots for which you own or have explicit permissions, or all for public snapshots.

If you are describing a long list of snapshots, we recommend that you paginate the output to make the list more manageable. The MaxResults parameter sets the maximum number of results returned in a single page. If the list of results exceeds your MaxResults value, then that number of results is returned along with a NextToken value that can be passed to a subsequent DescribeSnapshots request to retrieve the remaining results.

To get the state of fast snapshot restores for a snapshot, use DescribeFastSnapshotRestores.

For more information about EBS snapshots, see Amazon EBS snapshots in the Amazon Elastic Compute Cloud User Guide.

" }, "DescribeSpotDatafeedSubscription":{ "name":"DescribeSpotDatafeedSubscription", @@ -2490,7 +2500,7 @@ }, "input":{"shape":"DescribeVolumeAttributeRequest"}, "output":{"shape":"DescribeVolumeAttributeResult"}, - "documentation":"

Describes the specified attribute of the specified volume. You can specify only one attribute at a time.

For more information about EBS volumes, see Amazon EBS Volumes in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Describes the specified attribute of the specified volume. You can specify only one attribute at a time.

For more information about EBS volumes, see Amazon EBS volumes in the Amazon Elastic Compute Cloud User Guide.

" }, "DescribeVolumeStatus":{ "name":"DescribeVolumeStatus", @@ -2500,7 +2510,7 @@ }, "input":{"shape":"DescribeVolumeStatusRequest"}, "output":{"shape":"DescribeVolumeStatusResult"}, - "documentation":"

Describes the status of the specified volumes. Volume status provides the result of the checks performed on your volumes to determine events that can impair the performance of your volumes. The performance of a volume can be affected if an issue occurs on the volume's underlying host. If the volume's underlying host experiences a power outage or system issue, after the system is restored, there could be data inconsistencies on the volume. Volume events notify you if this occurs. Volume actions notify you if any action needs to be taken in response to the event.

The DescribeVolumeStatus operation provides the following information about the specified volumes:

Status: Reflects the current status of the volume. The possible values are ok, impaired , warning, or insufficient-data. If all checks pass, the overall status of the volume is ok. If the check fails, the overall status is impaired. If the status is insufficient-data, then the checks may still be taking place on your volume at the time. We recommend that you retry the request. For more information about volume status, see Monitoring the status of your volumes in the Amazon Elastic Compute Cloud User Guide.

Events: Reflect the cause of a volume status and may require you to take action. For example, if your volume returns an impaired status, then the volume event might be potential-data-inconsistency. This means that your volume has been affected by an issue with the underlying host, has all I/O operations disabled, and may have inconsistent data.

Actions: Reflect the actions you may have to take in response to an event. For example, if the status of the volume is impaired and the volume event shows potential-data-inconsistency, then the action shows enable-volume-io. This means that you may want to enable the I/O operations for the volume by calling the EnableVolumeIO action and then check the volume for data consistency.

Volume status is based on the volume status checks, and does not reflect the volume state. Therefore, volume status does not indicate volumes in the error state (for example, when a volume is incapable of accepting I/O.)

" + "documentation":"

Describes the status of the specified volumes. Volume status provides the result of the checks performed on your volumes to determine events that can impair the performance of your volumes. The performance of a volume can be affected if an issue occurs on the volume's underlying host. If the volume's underlying host experiences a power outage or system issue, after the system is restored, there could be data inconsistencies on the volume. Volume events notify you if this occurs. Volume actions notify you if any action needs to be taken in response to the event.

The DescribeVolumeStatus operation provides the following information about the specified volumes:

Status: Reflects the current status of the volume. The possible values are ok, impaired , warning, or insufficient-data. If all checks pass, the overall status of the volume is ok. If the check fails, the overall status is impaired. If the status is insufficient-data, then the checks might still be taking place on your volume at the time. We recommend that you retry the request. For more information about volume status, see Monitoring the status of your volumes in the Amazon Elastic Compute Cloud User Guide.

Events: Reflect the cause of a volume status and might require you to take action. For example, if your volume returns an impaired status, then the volume event might be potential-data-inconsistency. This means that your volume has been affected by an issue with the underlying host, has all I/O operations disabled, and might have inconsistent data.

Actions: Reflect the actions you might have to take in response to an event. For example, if the status of the volume is impaired and the volume event shows potential-data-inconsistency, then the action shows enable-volume-io. This means that you may want to enable the I/O operations for the volume by calling the EnableVolumeIO action and then check the volume for data consistency.

Volume status is based on the volume status checks, and does not reflect the volume state. Therefore, volume status does not indicate volumes in the error state (for example, when a volume is incapable of accepting I/O.)

" }, "DescribeVolumes":{ "name":"DescribeVolumes", @@ -2510,7 +2520,7 @@ }, "input":{"shape":"DescribeVolumesRequest"}, "output":{"shape":"DescribeVolumesResult"}, - "documentation":"

Describes the specified EBS volumes or all of your EBS volumes.

If you are describing a long list of volumes, we recommend that you paginate the output to make the list more manageable. The MaxResults parameter sets the maximum number of results returned in a single page. If the list of results exceeds your MaxResults value, then that number of results is returned along with a NextToken value that can be passed to a subsequent DescribeVolumes request to retrieve the remaining results.

For more information about EBS volumes, see Amazon EBS Volumes in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Describes the specified EBS volumes or all of your EBS volumes.

If you are describing a long list of volumes, we recommend that you paginate the output to make the list more manageable. The MaxResults parameter sets the maximum number of results returned in a single page. If the list of results exceeds your MaxResults value, then that number of results is returned along with a NextToken value that can be passed to a subsequent DescribeVolumes request to retrieve the remaining results.

For more information about EBS volumes, see Amazon EBS volumes in the Amazon Elastic Compute Cloud User Guide.

" }, "DescribeVolumesModifications":{ "name":"DescribeVolumesModifications", @@ -2600,7 +2610,7 @@ }, "input":{"shape":"DescribeVpcEndpointServicesRequest"}, "output":{"shape":"DescribeVpcEndpointServicesResult"}, - "documentation":"

Describes available services to which you can create a VPC endpoint.

" + "documentation":"

Describes available services to which you can create a VPC endpoint.

When the service provider and the consumer have different accounts multiple Availability Zones, and the consumer views the VPC endpoint service information, the response only includes the common Availability Zones. For example, when the service provider account uses us-east-1a and us-east-1c and the consumer uses us-east-1a and us-east-1a and us-east-1b, the response includes the VPC endpoint services in the common Availability Zone, us-east-1a.

" }, "DescribeVpcEndpoints":{ "name":"DescribeVpcEndpoints", @@ -2707,7 +2717,7 @@ }, "input":{"shape":"DisableEbsEncryptionByDefaultRequest"}, "output":{"shape":"DisableEbsEncryptionByDefaultResult"}, - "documentation":"

Disables EBS encryption by default for your account in the current Region.

After you disable encryption by default, you can still create encrypted volumes by enabling encryption when you create each volume.

Disabling encryption by default does not change the encryption status of your existing volumes.

For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Disables EBS encryption by default for your account in the current Region.

After you disable encryption by default, you can still create encrypted volumes by enabling encryption when you create each volume.

Disabling encryption by default does not change the encryption status of your existing volumes.

For more information, see Amazon EBS encryption in the Amazon Elastic Compute Cloud User Guide.

" }, "DisableFastSnapshotRestores":{ "name":"DisableFastSnapshotRestores", @@ -2777,6 +2787,16 @@ "output":{"shape":"DisassociateClientVpnTargetNetworkResult"}, "documentation":"

Disassociates a target network from the specified Client VPN endpoint. When you disassociate the last target network from a Client VPN, the following happens:

  • The route that was automatically added for the VPC is deleted

  • All active client connections are terminated

  • New client connections are disallowed

  • The Client VPN endpoint's status changes to pending-associate

" }, + "DisassociateEnclaveCertificateIamRole":{ + "name":"DisassociateEnclaveCertificateIamRole", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisassociateEnclaveCertificateIamRoleRequest"}, + "output":{"shape":"DisassociateEnclaveCertificateIamRoleResult"}, + "documentation":"

Disassociates an IAM role from an AWS Certificate Manager (ACM) certificate. Disassociating an IAM role from an ACM certificate removes the Amazon S3 object that contains the certificate, certificate chain, and encrypted private key from the Amazon S3 bucket. It also revokes the IAM role's permission to use the AWS Key Management Service (KMS) customer master key (CMK) used to encrypt the private key. This effectively revokes the role's permission to use the certificate.

" + }, "DisassociateIamInstanceProfile":{ "name":"DisassociateIamInstanceProfile", "http":{ @@ -2844,7 +2864,7 @@ }, "input":{"shape":"EnableEbsEncryptionByDefaultRequest"}, "output":{"shape":"EnableEbsEncryptionByDefaultResult"}, - "documentation":"

Enables EBS encryption by default for your account in the current Region.

After you enable encryption by default, the EBS volumes that you create are are always encrypted, either using the default CMK or the CMK that you specified when you created each volume. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

You can specify the default CMK for encryption by default using ModifyEbsDefaultKmsKeyId or ResetEbsDefaultKmsKeyId.

Enabling encryption by default has no effect on the encryption status of your existing volumes.

After you enable encryption by default, you can no longer launch instances using instance types that do not support encryption. For more information, see Supported instance types.

" + "documentation":"

Enables EBS encryption by default for your account in the current Region.

After you enable encryption by default, the EBS volumes that you create are are always encrypted, either using the default CMK or the CMK that you specified when you created each volume. For more information, see Amazon EBS encryption in the Amazon Elastic Compute Cloud User Guide.

You can specify the default CMK for encryption by default using ModifyEbsDefaultKmsKeyId or ResetEbsDefaultKmsKeyId.

Enabling encryption by default has no effect on the encryption status of your existing volumes.

After you enable encryption by default, you can no longer launch instances using instance types that do not support encryption. For more information, see Supported instance types.

" }, "EnableFastSnapshotRestores":{ "name":"EnableFastSnapshotRestores", @@ -2944,6 +2964,16 @@ "output":{"shape":"ExportTransitGatewayRoutesResult"}, "documentation":"

Exports routes from the specified transit gateway route table to the specified S3 bucket. By default, all routes are exported. Alternatively, you can filter by CIDR range.

The routes are saved to the specified bucket in a JSON file. For more information, see Export Route Tables to Amazon S3 in Transit Gateways.

" }, + "GetAssociatedEnclaveCertificateIamRoles":{ + "name":"GetAssociatedEnclaveCertificateIamRoles", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetAssociatedEnclaveCertificateIamRolesRequest"}, + "output":{"shape":"GetAssociatedEnclaveCertificateIamRolesResult"}, + "documentation":"

Returns the IAM roles that are associated with the specified AWS Certificate Manager (ACM) certificate. It also returns the name of the Amazon S3 bucket and the Amazon S3 object key where the certificate, certificate chain, and encrypted private key bundle are stored, and the ARN of the AWS Key Management Service (KMS) customer master key (CMK) that's used to encrypt the private key.

" + }, "GetAssociatedIpv6PoolCidrs":{ "name":"GetAssociatedIpv6PoolCidrs", "http":{ @@ -3012,7 +3042,7 @@ }, "input":{"shape":"GetEbsDefaultKmsKeyIdRequest"}, "output":{"shape":"GetEbsDefaultKmsKeyIdResult"}, - "documentation":"

Describes the default customer master key (CMK) for EBS encryption by default for your account in this Region. You can change the default CMK for encryption by default using ModifyEbsDefaultKmsKeyId or ResetEbsDefaultKmsKeyId.

For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Describes the default customer master key (CMK) for EBS encryption by default for your account in this Region. You can change the default CMK for encryption by default using ModifyEbsDefaultKmsKeyId or ResetEbsDefaultKmsKeyId.

For more information, see Amazon EBS encryption in the Amazon Elastic Compute Cloud User Guide.

" }, "GetEbsEncryptionByDefault":{ "name":"GetEbsEncryptionByDefault", @@ -3022,7 +3052,7 @@ }, "input":{"shape":"GetEbsEncryptionByDefaultRequest"}, "output":{"shape":"GetEbsEncryptionByDefaultResult"}, - "documentation":"

Describes whether EBS encryption by default is enabled for your account in the current Region.

For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Describes whether EBS encryption by default is enabled for your account in the current Region.

For more information, see Amazon EBS encryption in the Amazon Elastic Compute Cloud User Guide.

" }, "GetGroupsForCapacityReservation":{ "name":"GetGroupsForCapacityReservation", @@ -3252,7 +3282,7 @@ }, "input":{"shape":"ModifyEbsDefaultKmsKeyIdRequest"}, "output":{"shape":"ModifyEbsDefaultKmsKeyIdResult"}, - "documentation":"

Changes the default customer master key (CMK) for EBS encryption by default for your account in this Region.

AWS creates a unique AWS managed CMK in each Region for use with encryption by default. If you change the default CMK to a symmetric customer managed CMK, it is used instead of the AWS managed CMK. To reset the default CMK to the AWS managed CMK for EBS, use ResetEbsDefaultKmsKeyId. Amazon EBS does not support asymmetric CMKs.

If you delete or disable the customer managed CMK that you specified for use with encryption by default, your instances will fail to launch.

For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Changes the default customer master key (CMK) for EBS encryption by default for your account in this Region.

AWS creates a unique AWS managed CMK in each Region for use with encryption by default. If you change the default CMK to a symmetric customer managed CMK, it is used instead of the AWS managed CMK. To reset the default CMK to the AWS managed CMK for EBS, use ResetEbsDefaultKmsKeyId. Amazon EBS does not support asymmetric CMKs.

If you delete or disable the customer managed CMK that you specified for use with encryption by default, your instances will fail to launch.

For more information, see Amazon EBS encryption in the Amazon Elastic Compute Cloud User Guide.

" }, "ModifyFleet":{ "name":"ModifyFleet", @@ -3505,7 +3535,7 @@ }, "input":{"shape":"ModifyVolumeRequest"}, "output":{"shape":"ModifyVolumeResult"}, - "documentation":"

You can modify several parameters of an existing EBS volume, including volume size, volume type, and IOPS capacity. If your EBS volume is attached to a current-generation EC2 instance type, you may be able to apply these changes without stopping the instance or detaching the volume from it. For more information about modifying an EBS volume running Linux, see Modifying the size, IOPS, or type of an EBS volume on Linux. For more information about modifying an EBS volume running Windows, see Modifying the size, IOPS, or type of an EBS volume on Windows.

When you complete a resize operation on your volume, you need to extend the volume's file-system size to take advantage of the new storage capacity. For information about extending a Linux file system, see Extending a Linux file system. For information about extending a Windows file system, see Extending a Windows file system.

You can use CloudWatch Events to check the status of a modification to an EBS volume. For information about CloudWatch Events, see the Amazon CloudWatch Events User Guide. You can also track the status of a modification using DescribeVolumesModifications. For information about tracking status changes using either method, see Monitoring volume modifications.

With previous-generation instance types, resizing an EBS volume may require detaching and reattaching the volume or stopping and restarting the instance. For more information, see Modifying the size, IOPS, or type of an EBS volume on Linux and Modifying the size, IOPS, or type of an EBS volume on Windows.

If you reach the maximum volume modification rate per volume limit, you will need to wait at least six hours before applying further modifications to the affected EBS volume.

" + "documentation":"

You can modify several parameters of an existing EBS volume, including volume size, volume type, and IOPS capacity. If your EBS volume is attached to a current-generation EC2 instance type, you might be able to apply these changes without stopping the instance or detaching the volume from it. For more information about modifying an EBS volume running Linux, see Modifying the size, IOPS, or type of an EBS volume on Linux. For more information about modifying an EBS volume running Windows, see Modifying the size, IOPS, or type of an EBS volume on Windows.

When you complete a resize operation on your volume, you need to extend the volume's file-system size to take advantage of the new storage capacity. For information about extending a Linux file system, see Extending a Linux file system. For information about extending a Windows file system, see Extending a Windows file system.

You can use CloudWatch Events to check the status of a modification to an EBS volume. For information about CloudWatch Events, see the Amazon CloudWatch Events User Guide. You can also track the status of a modification using DescribeVolumesModifications. For information about tracking status changes using either method, see Monitoring volume modifications.

With previous-generation instance types, resizing an EBS volume might require detaching and reattaching the volume or stopping and restarting the instance. For more information, see Amazon EBS Elastic Volumes (Linux) or Amazon EBS Elastic Volumes (Windows).

If you reach the maximum volume modification rate per volume limit, you will need to wait at least six hours before applying further modifications to the affected EBS volume.

" }, "ModifyVolumeAttribute":{ "name":"ModifyVolumeAttribute", @@ -3533,7 +3563,7 @@ }, "input":{"shape":"ModifyVpcEndpointRequest"}, "output":{"shape":"ModifyVpcEndpointResult"}, - "documentation":"

Modifies attributes of a specified VPC endpoint. The attributes that you can modify depend on the type of VPC endpoint (interface or gateway). For more information, see VPC Endpoints in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Modifies attributes of a specified VPC endpoint. The attributes that you can modify depend on the type of VPC endpoint (interface, gateway, or Gateway Load Balancer). For more information, see VPC Endpoints in the Amazon Virtual Private Cloud User Guide.

" }, "ModifyVpcEndpointConnectionNotification":{ "name":"ModifyVpcEndpointConnectionNotification", @@ -3553,7 +3583,7 @@ }, "input":{"shape":"ModifyVpcEndpointServiceConfigurationRequest"}, "output":{"shape":"ModifyVpcEndpointServiceConfigurationResult"}, - "documentation":"

Modifies the attributes of your VPC endpoint service configuration. You can change the Network Load Balancers for your service, and you can specify whether acceptance is required for requests to connect to your endpoint service through an interface VPC endpoint.

If you set or modify the private DNS name, you must prove that you own the private DNS domain name. For more information, see VPC Endpoint Service Private DNS Name Verification in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Modifies the attributes of your VPC endpoint service configuration. You can change the Network Load Balancers or Gateway Load Balancers for your service, and you can specify whether acceptance is required for requests to connect to your endpoint service through an interface VPC endpoint.

If you set or modify the private DNS name, you must prove that you own the private DNS domain name. For more information, see VPC Endpoint Service Private DNS Name Verification in the Amazon Virtual Private Cloud User Guide.

" }, "ModifyVpcEndpointServicePermissions":{ "name":"ModifyVpcEndpointServicePermissions", @@ -3692,7 +3722,7 @@ "requestUri":"/" }, "input":{"shape":"RebootInstancesRequest"}, - "documentation":"

Requests a reboot of the specified instances. This operation is asynchronous; it only queues a request to reboot the specified instances. The operation succeeds if the instances are valid and belong to you. Requests to reboot terminated instances are ignored.

If an instance does not cleanly shut down within four minutes, Amazon EC2 performs a hard reboot.

For more information about troubleshooting, see Getting console output and rebooting instances in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Requests a reboot of the specified instances. This operation is asynchronous; it only queues a request to reboot the specified instances. The operation succeeds if the instances are valid and belong to you. Requests to reboot terminated instances are ignored.

If an instance does not cleanly shut down within a few minutes, Amazon EC2 performs a hard reboot.

For more information about troubleshooting, see Getting console output and rebooting instances in the Amazon Elastic Compute Cloud User Guide.

" }, "RegisterImage":{ "name":"RegisterImage", @@ -3888,7 +3918,7 @@ }, "input":{"shape":"ResetEbsDefaultKmsKeyIdRequest"}, "output":{"shape":"ResetEbsDefaultKmsKeyIdResult"}, - "documentation":"

Resets the default customer master key (CMK) for EBS encryption for your account in this Region to the AWS managed CMK for EBS.

After resetting the default CMK to the AWS managed CMK, you can continue to encrypt by a customer managed CMK by specifying it when you create the volume. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Resets the default customer master key (CMK) for EBS encryption for your account in this Region to the AWS managed CMK for EBS.

After resetting the default CMK to the AWS managed CMK, you can continue to encrypt by a customer managed CMK by specifying it when you create the volume. For more information, see Amazon EBS encryption in the Amazon Elastic Compute Cloud User Guide.

" }, "ResetFpgaImageAttribute":{ "name":"ResetFpgaImageAttribute", @@ -4063,7 +4093,7 @@ }, "input":{"shape":"StartVpcEndpointServicePrivateDnsVerificationRequest"}, "output":{"shape":"StartVpcEndpointServicePrivateDnsVerificationResult"}, - "documentation":"

Initiates the verification process to prove that the service provider owns the private DNS name domain for the endpoint service.

The service provider must successfully perform the verification before the consumer can use the name to access the service.

Before the service provider runs this command, they must add a record to the DNS server. For more information, see Adding a TXT Record to Your Domain's DNS Server in the Amazon VPC User Guide.

" + "documentation":"

Initiates the verification process to prove that the service provider owns the private DNS name domain for the endpoint service.

The service provider must successfully perform the verification before the consumer can use the name to access the service.

Before the service provider runs this command, they must add a record to the DNS server. For more information, see Adding a TXT Record to Your Domain's DNS Server in the Amazon VPC User Guide.

" }, "StopInstances":{ "name":"StopInstances", @@ -4537,7 +4567,7 @@ "documentation":"

[EC2-VPC] The Elastic IP address to recover or an IPv4 address from an address pool.

" }, "PublicIpv4Pool":{ - "shape":"String", + "shape":"Ipv4PoolEc2Id", "documentation":"

The ID of an address pool that you own. Use this parameter to let Amazon EC2 select an address from the address pool. To specify a specific address from the address pool, use the Address parameter instead.

" }, "NetworkBorderGroup":{ @@ -4715,6 +4745,13 @@ "off" ] }, + "ApplianceModeSupportValue":{ + "type":"string", + "enum":[ + "enable", + "disable" + ] + }, "ApplySecurityGroupsToClientVpnTargetNetworkRequest":{ "type":"structure", "required":[ @@ -4981,6 +5018,43 @@ } } }, + "AssociateEnclaveCertificateIamRoleRequest":{ + "type":"structure", + "members":{ + "CertificateArn":{ + "shape":"ResourceArn", + "documentation":"

The ARN of the ACM certificate with which to associate the IAM role.

" + }, + "RoleArn":{ + "shape":"ResourceArn", + "documentation":"

The ARN of the IAM role to associate with the ACM certificate. You can associate up to 16 IAM roles with an ACM certificate.

" + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + } + } + }, + "AssociateEnclaveCertificateIamRoleResult":{ + "type":"structure", + "members":{ + "CertificateS3BucketName":{ + "shape":"String", + "documentation":"

The name of the Amazon S3 bucket to which the certificate was uploaded.

", + "locationName":"certificateS3BucketName" + }, + "CertificateS3ObjectKey":{ + "shape":"String", + "documentation":"

The Amazon S3 object key where the certificate, certificate chain, and encrypted private key bundle are stored. The object key is formatted as follows: certificate_arn/role_arn.

", + "locationName":"certificateS3ObjectKey" + }, + "EncryptionKmsKeyId":{ + "shape":"String", + "documentation":"

The ID of the AWS KMS CMK used to encrypt the private key of the certificate.

", + "locationName":"encryptionKmsKeyId" + } + } + }, "AssociateIamInstanceProfileRequest":{ "type":"structure", "required":[ @@ -5200,6 +5274,39 @@ "type":"string", "enum":["vpc"] }, + "AssociatedRole":{ + "type":"structure", + "members":{ + "AssociatedRoleArn":{ + "shape":"ResourceArn", + "documentation":"

The ARN of the associated IAM role.

", + "locationName":"associatedRoleArn" + }, + "CertificateS3BucketName":{ + "shape":"String", + "documentation":"

The name of the Amazon S3 bucket in which the Amazon S3 object is stored.

", + "locationName":"certificateS3BucketName" + }, + "CertificateS3ObjectKey":{ + "shape":"String", + "documentation":"

The key of the Amazon S3 object ey where the certificate, certificate chain, and encrypted private key bundle is stored. The object key is formated as follows: certificate_arn/role_arn.

", + "locationName":"certificateS3ObjectKey" + }, + "EncryptionKmsKeyId":{ + "shape":"String", + "documentation":"

The ID of the KMS customer master key (CMK) used to encrypt the private key.

", + "locationName":"encryptionKmsKeyId" + } + }, + "documentation":"

Information about the associated IAM roles.

" + }, + "AssociatedRolesList":{ + "type":"list", + "member":{ + "shape":"AssociatedRole", + "locationName":"item" + } + }, "AssociatedTargetNetwork":{ "type":"structure", "members":{ @@ -5347,6 +5454,10 @@ "shape":"NetworkInterfaceId", "documentation":"

The ID of the network interface.

", "locationName":"networkInterfaceId" + }, + "NetworkCardIndex":{ + "shape":"Integer", + "documentation":"

The index of the network card. Some instance types support multiple network cards. The primary network interface must be assigned to network card index 0. The default is network card index 0.

" } }, "documentation":"

Contains the parameters for AttachNetworkInterface.

" @@ -5358,6 +5469,11 @@ "shape":"String", "documentation":"

The ID of the network interface attachment.

", "locationName":"attachmentId" + }, + "NetworkCardIndex":{ + "shape":"Integer", + "documentation":"

The index of the network card.

", + "locationName":"networkCardIndex" } }, "documentation":"

Contains the output of AttachNetworkInterface.

" @@ -6507,7 +6623,7 @@ "locationName":"usageStrategy" } }, - "documentation":"

Describes the strategy for using unused Capacity Reservations for fulfilling On-Demand capacity.

This strategy can only be used if the EC2 Fleet is of type instant.

For more information about Capacity Reservations, see On-Demand Capacity Reservations in the Amazon Elastic Compute Cloud User Guide. For examples of using Capacity Reservations in an EC2 Fleet, see EC2 Fleet Example Configurations in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Describes the strategy for using unused Capacity Reservations for fulfilling On-Demand capacity.

This strategy can only be used if the EC2 Fleet is of type instant.

For more information about Capacity Reservations, see On-Demand Capacity Reservations in the Amazon Elastic Compute Cloud User Guide. For examples of using Capacity Reservations in an EC2 Fleet, see EC2 Fleet example configurations in the Amazon Elastic Compute Cloud User Guide.

" }, "CapacityReservationOptionsRequest":{ "type":"structure", @@ -6517,7 +6633,7 @@ "documentation":"

Indicates whether to use unused Capacity Reservations for fulfilling On-Demand capacity.

If you specify use-capacity-reservations-first, the fleet uses unused Capacity Reservations to fulfill On-Demand capacity up to the target On-Demand capacity. If multiple instance pools have unused Capacity Reservations, the On-Demand allocation strategy (lowest-price or prioritized) is applied. If the number of unused Capacity Reservations is less than the On-Demand target capacity, the remaining On-Demand target capacity is launched according to the On-Demand allocation strategy (lowest-price or prioritized).

If you do not specify a value, the fleet fulfils the On-Demand capacity according to the chosen On-Demand allocation strategy.

" } }, - "documentation":"

Describes the strategy for using unused Capacity Reservations for fulfilling On-Demand capacity.

This strategy can only be used if the EC2 Fleet is of type instant.

For more information about Capacity Reservations, see On-Demand Capacity Reservations in the Amazon Elastic Compute Cloud User Guide. For examples of using Capacity Reservations in an EC2 Fleet, see EC2 Fleet Example Configurations in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Describes the strategy for using unused Capacity Reservations for fulfilling On-Demand capacity.

This strategy can only be used if the EC2 Fleet is of type instant.

For more information about Capacity Reservations, see On-Demand Capacity Reservations in the Amazon Elastic Compute Cloud User Guide. For examples of using Capacity Reservations in an EC2 Fleet, see EC2 Fleet example configurations in the Amazon Elastic Compute Cloud User Guide.

" }, "CapacityReservationPreference":{ "type":"string", @@ -6834,6 +6950,41 @@ "active" ] }, + "ClientConnectOptions":{ + "type":"structure", + "members":{ + "Enabled":{ + "shape":"Boolean", + "documentation":"

Indicates whether client connect options are enabled. The default is false (not enabled).

" + }, + "LambdaFunctionArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the AWS Lambda function used for connection authorization.

" + } + }, + "documentation":"

The options for managing connection authorization for new client connections.

" + }, + "ClientConnectResponseOptions":{ + "type":"structure", + "members":{ + "Enabled":{ + "shape":"Boolean", + "documentation":"

Indicates whether client connect options are enabled.

", + "locationName":"enabled" + }, + "LambdaFunctionArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the AWS Lambda function used for connection authorization.

", + "locationName":"lambdaFunctionArn" + }, + "Status":{ + "shape":"ClientVpnEndpointAttributeStatus", + "documentation":"

The status of any updates to the client connect options.

", + "locationName":"status" + } + }, + "documentation":"

The options for managing connection authorization for new client connections.

" + }, "ClientData":{ "type":"structure", "members":{ @@ -6881,7 +7032,7 @@ "locationName":"federatedAuthentication" } }, - "documentation":"

Describes the authentication methods used by a Client VPN endpoint. For more information, see Authentication in the AWS Client VPN Administrator Guide.

" + "documentation":"

Describes the authentication methods used by a Client VPN endpoint. For more information, see Authentication in the AWS Client VPN Administrator Guide.

" }, "ClientVpnAuthenticationList":{ "type":"list", @@ -7016,6 +7167,11 @@ "shape":"String", "documentation":"

The date and time the client connection was terminated.

", "locationName":"connectionEndTime" + }, + "PostureComplianceStatuses":{ + "shape":"ValueStringList", + "documentation":"

The statuses returned by the client connect handler for posture compliance, if applicable.

", + "locationName":"postureComplianceStatusSet" } }, "documentation":"

Describes a client connection.

" @@ -7151,10 +7307,43 @@ "shape":"VpcId", "documentation":"

The ID of the VPC.

", "locationName":"vpcId" + }, + "SelfServicePortalUrl":{ + "shape":"String", + "documentation":"

The URL of the self-service portal.

", + "locationName":"selfServicePortalUrl" + }, + "ClientConnectOptions":{ + "shape":"ClientConnectResponseOptions", + "documentation":"

The options for managing connection authorization for new client connections.

", + "locationName":"clientConnectOptions" } }, "documentation":"

Describes a Client VPN endpoint.

" }, + "ClientVpnEndpointAttributeStatus":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"ClientVpnEndpointAttributeStatusCode", + "documentation":"

The status code.

", + "locationName":"code" + }, + "Message":{ + "shape":"String", + "documentation":"

The status message.

", + "locationName":"message" + } + }, + "documentation":"

Describes the status of the Client VPN endpoint attribute.

" + }, + "ClientVpnEndpointAttributeStatusCode":{ + "type":"string", + "enum":[ + "applying", + "applied" + ] + }, "ClientVpnEndpointId":{"type":"string"}, "ClientVpnEndpointIdList":{ "type":"list", @@ -7620,7 +7809,7 @@ }, "KmsKeyId":{ "shape":"KmsKeyId", - "documentation":"

An identifier for the symmetric AWS Key Management Service (AWS KMS) customer master key (CMK) to use when creating the encrypted volume. This parameter is only required if you want to use a non-default CMK; if this parameter is not specified, the default CMK for EBS is used. If a KmsKeyId is specified, the Encrypted flag must also be set.

To specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. When using an alias name, prefix it with \"alias/\". For example:

  • Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

  • Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

  • Alias name: alias/ExampleAlias

  • Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias

AWS parses KmsKeyId asynchronously, meaning that the action you call may appear to complete even though you provided an invalid identifier. This action will eventually report failure.

The specified CMK must exist in the Region that the snapshot is being copied to.

Amazon EBS does not support asymmetric CMKs.

", + "documentation":"

The identifier of the symmetric AWS Key Management Service (AWS KMS) customer master key (CMK) to use when creating encrypted volumes. If this parameter is not specified, your AWS managed CMK for EBS is used. If you specify a CMK, you must also set the encrypted state to true.

You can specify a CMK using any of the following:

  • Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab.

  • Key alias. For example, alias/ExampleAlias.

  • Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/1234abcd-12ab-34cd-56ef-1234567890ab.

  • Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.

AWS authenticates the CMK asynchronously. Therefore, if you specify an identifier that is not valid, the action can appear to complete, but eventually fails.

The specified CMK must exist in the destination Region.

Amazon EBS does not support asymmetric CMKs.

", "locationName":"kmsKeyId" }, "Name":{ @@ -7672,17 +7861,17 @@ }, "Encrypted":{ "shape":"Boolean", - "documentation":"

To encrypt a copy of an unencrypted snapshot if encryption by default is not enabled, enable encryption using this parameter. Otherwise, omit this parameter. Encrypted snapshots are encrypted, even if you omit this parameter and encryption by default is not enabled. You cannot set this parameter to false. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

", + "documentation":"

To encrypt a copy of an unencrypted snapshot if encryption by default is not enabled, enable encryption using this parameter. Otherwise, omit this parameter. Encrypted snapshots are encrypted, even if you omit this parameter and encryption by default is not enabled. You cannot set this parameter to false. For more information, see Amazon EBS encryption in the Amazon Elastic Compute Cloud User Guide.

", "locationName":"encrypted" }, "KmsKeyId":{ "shape":"KmsKeyId", - "documentation":"

The identifier of the AWS Key Management Service (AWS KMS) customer master key (CMK) to use for Amazon EBS encryption. If this parameter is not specified, your AWS managed CMK for EBS is used. If KmsKeyId is specified, the encrypted state must be true.

You can specify the CMK using any of the following:

  • Key ID. For example, key/1234abcd-12ab-34cd-56ef-1234567890ab.

  • Key alias. For example, alias/ExampleAlias.

  • Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef.

  • Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.

AWS authenticates the CMK asynchronously. Therefore, if you specify an ID, alias, or ARN that is not valid, the action can appear to complete, but eventually fails.

", + "documentation":"

The identifier of the AWS Key Management Service (AWS KMS) customer master key (CMK) to use for Amazon EBS encryption. If this parameter is not specified, your AWS managed CMK for EBS is used. If KmsKeyId is specified, the encrypted state must be true.

You can specify the CMK using any of the following:

  • Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab.

  • Key alias. For example, alias/ExampleAlias.

  • Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/1234abcd-12ab-34cd-56ef-1234567890ab.

  • Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.

AWS authenticates the CMK asynchronously. Therefore, if you specify an ID, alias, or ARN that is not valid, the action can appear to complete, but eventually fails.

", "locationName":"kmsKeyId" }, "PresignedUrl":{ "shape":"String", - "documentation":"

When you copy an encrypted source snapshot using the Amazon EC2 Query API, you must supply a pre-signed URL. This parameter is optional for unencrypted snapshots. For more information, see Query Requests.

The PresignedUrl should use the snapshot source endpoint, the CopySnapshot action, and include the SourceRegion, SourceSnapshotId, and DestinationRegion parameters. The PresignedUrl must be signed using AWS Signature Version 4. Because EBS snapshots are stored in Amazon S3, the signing algorithm for this parameter uses the same logic that is described in Authenticating Requests by Using Query Parameters (AWS Signature Version 4) in the Amazon Simple Storage Service API Reference. An invalid or improperly signed PresignedUrl will cause the copy operation to fail asynchronously, and the snapshot will move to an error state.

", + "documentation":"

When you copy an encrypted source snapshot using the Amazon EC2 Query API, you must supply a pre-signed URL. This parameter is optional for unencrypted snapshots. For more information, see Query requests.

The PresignedUrl should use the snapshot source endpoint, the CopySnapshot action, and include the SourceRegion, SourceSnapshotId, and DestinationRegion parameters. The PresignedUrl must be signed using AWS Signature Version 4. Because EBS snapshots are stored in Amazon S3, the signing algorithm for this parameter uses the same logic that is described in Authenticating Requests: Using Query Parameters (AWS Signature Version 4) in the Amazon Simple Storage Service API Reference. An invalid or improperly signed PresignedUrl will cause the copy operation to fail asynchronously, and the snapshot will move to an error state.

", "locationName":"presignedUrl" }, "SourceRegion":{ @@ -7940,6 +8129,14 @@ "VpcId":{ "shape":"VpcId", "documentation":"

The ID of the VPC to associate with the Client VPN endpoint. If no security group IDs are specified in the request, the default security group for the VPC is applied.

" + }, + "SelfServicePortal":{ + "shape":"SelfServicePortal", + "documentation":"

Specify whether to enable the self-service portal for the Client VPN endpoint.

Default Value: enabled

" + }, + "ClientConnectOptions":{ + "shape":"ClientConnectOptions", + "documentation":"

The options for managing connection authorization for new client connections.

" } } }, @@ -8284,7 +8481,7 @@ }, "Type":{ "shape":"FleetType", - "documentation":"

The type of the request. By default, the EC2 Fleet places an asynchronous request for your desired capacity, and maintains it by replenishing interrupted Spot Instances (maintain). A value of instant places a synchronous one-time request, and returns errors for any instances that could not be launched. A value of request places an asynchronous one-time request without maintaining capacity or submitting requests in alternative capacity pools if capacity is unavailable. For more information, see EC2 Fleet Request Types in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

The type of request. The default value is maintain.

  • maintain - The EC2 Fleet plaees an asynchronous request for your desired capacity, and continues to maintain your desired Spot capacity by replenishing interrupted Spot Instances.

  • request - The EC2 Fleet places an asynchronous one-time request for your desired capacity, but does submit Spot requests in alternative capacity pools if Spot capacity is unavailable, and does not maintain Spot capacity if Spot Instances are interrupted.

  • instant - The EC2 Fleet places a synchronous one-time request for your desired capacity, and returns errors for any instances that could not be launched.

For more information, see EC2 Fleet request types in the Amazon Elastic Compute Cloud User Guide.

" }, "ValidFrom":{ "shape":"DateTime", @@ -8300,7 +8497,7 @@ }, "TagSpecifications":{ "shape":"TagSpecificationList", - "documentation":"

The key-value pair for tagging the EC2 Fleet request on creation. The value for ResourceType must be fleet, otherwise the fleet request fails. To tag instances at launch, specify the tags in the launch template. For information about tagging after launch, see Tagging Your Resources.

", + "documentation":"

The key-value pair for tagging the EC2 Fleet request on creation. The value for ResourceType must be fleet, otherwise the fleet request fails. To tag instances at launch, specify the tags in the launch template. For information about tagging after launch, see Tagging your resources.

", "locationName":"TagSpecification" } } @@ -8506,7 +8703,11 @@ }, "CreateInstanceExportTaskRequest":{ "type":"structure", - "required":["InstanceId"], + "required":[ + "ExportToS3Task", + "InstanceId", + "TargetEnvironment" + ], "members":{ "Description":{ "shape":"String", @@ -9167,6 +9368,10 @@ "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "locationName":"dryRun" }, + "VpcEndpointId":{ + "shape":"VpcEndpointId", + "documentation":"

The ID of a VPC endpoint. Supported for Gateway Load Balancer endpoints only.

" + }, "EgressOnlyInternetGatewayId":{ "shape":"EgressOnlyInternetGatewayId", "documentation":"

[IPv6 traffic only] The ID of an egress-only internet gateway.

", @@ -9371,7 +9576,7 @@ "members":{ "Bucket":{ "shape":"String", - "documentation":"

The Amazon S3 bucket in which to store the Spot Instance data feed.

", + "documentation":"

The name of the Amazon S3 bucket in which to store the Spot Instance data feed. For more information about bucket names, see Rules for bucket naming in the Amazon S3 Developer Guide.

", "locationName":"bucket" }, "DryRun":{ @@ -9381,7 +9586,7 @@ }, "Prefix":{ "shape":"String", - "documentation":"

A prefix for the data feed file names.

", + "documentation":"

The prefix for the data feed file names.

", "locationName":"prefix" } }, @@ -9960,7 +10165,11 @@ }, "Ipv6Support":{ "shape":"Ipv6SupportValue", - "documentation":"

Enable or disable IPv6 support. The default is enable.

" + "documentation":"

Enable or disable IPv6 support.

" + }, + "ApplianceModeSupport":{ + "shape":"ApplianceModeSupportValue", + "documentation":"

Enable or disable support for appliance mode. If enabled, a traffic flow between a source and destination uses the same Availability Zone for the VPC attachment for the lifetime of that flow. The default is disable.

" } }, "documentation":"

Describes the options for a VPC attachment.

" @@ -10022,16 +10231,16 @@ }, "Encrypted":{ "shape":"Boolean", - "documentation":"

Specifies whether the volume should be encrypted. The effect of setting the encryption state to true depends on the volume origin (new or from a snapshot), starting encryption state, ownership, and whether encryption by default is enabled. For more information, see Encryption by default in the Amazon Elastic Compute Cloud User Guide.

Encrypted Amazon EBS volumes must be attached to instances that support Amazon EBS encryption. For more information, see Supported instance types.

", + "documentation":"

Indicates whether the volume should be encrypted. The effect of setting the encryption state to true depends on the volume origin (new or from a snapshot), starting encryption state, ownership, and whether encryption by default is enabled. For more information, see Encryption by default in the Amazon Elastic Compute Cloud User Guide.

Encrypted Amazon EBS volumes must be attached to instances that support Amazon EBS encryption. For more information, see Supported instance types.

", "locationName":"encrypted" }, "Iops":{ "shape":"Integer", - "documentation":"

The number of I/O operations per second (IOPS) to provision for an io1 or io2 volume, with a maximum ratio of 50 IOPS/GiB for io1, and 500 IOPS/GiB for io2. Range is 100 to 64,000 IOPS for volumes in most Regions. Maximum IOPS of 64,000 is guaranteed only on Nitro-based instances. Other instance families guarantee performance up to 32,000 IOPS. For more information, see Amazon EBS volume types in the Amazon Elastic Compute Cloud User Guide.

This parameter is valid only for Provisioned IOPS SSD (io1 and io2) volumes.

" + "documentation":"

The number of I/O operations per second (IOPS). For gp3, io1, and io2 volumes, this represents the number of IOPS that are provisioned for the volume. For gp2 volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting.

The following are the supported values for each volume type:

  • gp3: 3,000-16,000 IOPS

  • io1: 100-64,000 IOPS

  • io2: 100-64,000 IOPS

For io1 and io2 volumes, we guarantee 64,000 IOPS only for Instances built on the Nitro System. Other instance families guarantee performance up to 32,000 IOPS.

This parameter is required for io1 and io2 volumes. The default for gp3 volumes is 3,000 IOPS. This parameter is not supported for gp2, st1, sc1, or standard volumes.

" }, "KmsKeyId":{ "shape":"KmsKeyId", - "documentation":"

The identifier of the AWS Key Management Service (AWS KMS) customer master key (CMK) to use for Amazon EBS encryption. If this parameter is not specified, your AWS managed CMK for EBS is used. If KmsKeyId is specified, the encrypted state must be true.

You can specify the CMK using any of the following:

  • Key ID. For example, key/1234abcd-12ab-34cd-56ef-1234567890ab.

  • Key alias. For example, alias/ExampleAlias.

  • Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef.

  • Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.

AWS authenticates the CMK asynchronously. Therefore, if you specify an ID, alias, or ARN that is not valid, the action can appear to complete, but eventually fails.

" + "documentation":"

The identifier of the AWS Key Management Service (AWS KMS) customer master key (CMK) to use for Amazon EBS encryption. If this parameter is not specified, your AWS managed CMK for EBS is used. If KmsKeyId is specified, the encrypted state must be true.

You can specify the CMK using any of the following:

  • Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab.

  • Key alias. For example, alias/ExampleAlias.

  • Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/1234abcd-12ab-34cd-56ef-1234567890ab.

  • Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.

AWS authenticates the CMK asynchronously. Therefore, if you specify an ID, alias, or ARN that is not valid, the action can appear to complete, but eventually fails.

" }, "OutpostArn":{ "shape":"String", @@ -10039,7 +10248,7 @@ }, "Size":{ "shape":"Integer", - "documentation":"

The size of the volume, in GiBs. You must specify either a snapshot ID or a volume size.

Constraints: 1-16,384 for gp2, 4-16,384 for io1 and io2, 500-16,384 for st1, 500-16,384 for sc1, and 1-1,024 for standard. If you specify a snapshot, the volume size must be equal to or larger than the snapshot size.

Default: If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size.

" + "documentation":"

The size of the volume, in GiBs. You must specify either a snapshot ID or a volume size. If you specify a snapshot, the default is the snapshot size. You can specify a volume size that is equal to or larger than the snapshot size.

The following are the supported volumes sizes for each volume type:

  • gp2 and gp3: 1-16,384

  • io1 and io2: 4-16,384

  • st1 and sc1: 125-16,384

  • standard: 1-1,024

" }, "SnapshotId":{ "shape":"SnapshotId", @@ -10047,7 +10256,7 @@ }, "VolumeType":{ "shape":"VolumeType", - "documentation":"

The volume type. This can be gp2 for General Purpose SSD, io1 or io2 for Provisioned IOPS SSD, st1 for Throughput Optimized HDD, sc1 for Cold HDD, or standard for Magnetic volumes.

Default: gp2

" + "documentation":"

The volume type. This parameter can be one of the following values:

  • General Purpose SSD: gp2 | gp3

  • Provisioned IOPS SSD: io1 | io2

  • Throughput Optimized HDD: st1

  • Cold HDD: sc1

  • Magnetic: standard

For more information, see Amazon EBS volume types in the Amazon Elastic Compute Cloud User Guide.

Default: gp2

" }, "DryRun":{ "shape":"Boolean", @@ -10061,7 +10270,11 @@ }, "MultiAttachEnabled":{ "shape":"Boolean", - "documentation":"

Specifies whether to enable Amazon EBS Multi-Attach. If you enable Multi-Attach, you can attach the volume to up to 16 Nitro-based instances in the same Availability Zone. For more information, see Amazon EBS Multi-Attach in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Indicates whether to enable Amazon EBS Multi-Attach. If you enable Multi-Attach, you can attach the volume to up to 16 Instances built on the Nitro System in the same Availability Zone. This parameter is supported with io1 volumes only. For more information, see Amazon EBS Multi-Attach in the Amazon Elastic Compute Cloud User Guide.

" + }, + "Throughput":{ + "shape":"Integer", + "documentation":"

The throughput to provision for a volume, with a maximum of 1,000 MiB/s.

This parameter is valid only for gp3 volumes.

Valid Range: Minimum value of 125. Maximum value of 1000.

" } } }, @@ -10138,7 +10351,7 @@ }, "PolicyDocument":{ "shape":"String", - "documentation":"

A policy to attach to the endpoint that controls access to the service. The policy must be in valid JSON format. If this parameter is not specified, we attach a default policy that allows full access to the service.

" + "documentation":"

(Interface and gateway endpoints) A policy to attach to the endpoint that controls access to the service. The policy must be in valid JSON format. If this parameter is not specified, we attach a default policy that allows full access to the service.

" }, "RouteTableIds":{ "shape":"VpcEndpointRouteTableIdList", @@ -10147,7 +10360,7 @@ }, "SubnetIds":{ "shape":"VpcEndpointSubnetIdList", - "documentation":"

(Interface endpoint) The ID of one or more subnets in which to create an endpoint network interface.

", + "documentation":"

(Interface and Gateway Load Balancer endpoints) The ID of one or more subnets in which to create an endpoint network interface. For a Gateway Load Balancer endpoint, you can specify one subnet only.

", "locationName":"SubnetId" }, "SecurityGroupIds":{ @@ -10189,7 +10402,6 @@ }, "CreateVpcEndpointServiceConfigurationRequest":{ "type":"structure", - "required":["NetworkLoadBalancerArns"], "members":{ "DryRun":{ "shape":"Boolean", @@ -10201,13 +10413,18 @@ }, "PrivateDnsName":{ "shape":"String", - "documentation":"

The private DNS name to assign to the VPC endpoint service.

" + "documentation":"

(Interface endpoint configuration) The private DNS name to assign to the VPC endpoint service.

" }, "NetworkLoadBalancerArns":{ "shape":"ValueStringList", "documentation":"

The Amazon Resource Names (ARNs) of one or more Network Load Balancers for your service.

", "locationName":"NetworkLoadBalancerArn" }, + "GatewayLoadBalancerArns":{ + "shape":"ValueStringList", + "documentation":"

The Amazon Resource Names (ARNs) of one or more Gateway Load Balancers.

", + "locationName":"GatewayLoadBalancerArn" + }, "ClientToken":{ "shape":"String", "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

" @@ -10548,6 +10765,7 @@ "DateTime":{"type":"timestamp"}, "DedicatedHostFlag":{"type":"boolean"}, "DedicatedHostId":{"type":"string"}, + "DefaultNetworkCardIndex":{"type":"integer"}, "DefaultRouteTableAssociationValue":{ "type":"string", "enum":[ @@ -10802,7 +11020,7 @@ }, "TerminateInstances":{ "shape":"Boolean", - "documentation":"

Indicates whether to terminate instances for an EC2 Fleet if it is deleted successfully.

" + "documentation":"

Indicates whether to terminate the instances when the EC2 Fleet is deleted. The default is to terminate the instances.

To let the instances continue to run after the EC2 Fleet is deleted, specify NoTerminateInstances. Supported only for fleets of type maintain and request.

For instant fleets, you cannot specify NoTerminateInstances. A deleted instant fleet with running instances is not supported.

" } } }, @@ -13268,7 +13486,7 @@ }, "Filter":{ "shape":"FilterList", - "documentation":"

One or more filters.

  • deliver-log-status - The status of the logs delivery (SUCCESS | FAILED).

  • log-destination-type - The type of destination to which the flow log publishes data. Possible destination types include cloud-watch-logs and S3.

  • flow-log-id - The ID of the flow log.

  • log-group-name - The name of the log group.

  • resource-id - The ID of the VPC, subnet, or network interface.

  • traffic-type - The type of traffic (ACCEPT | REJECT | ALL).

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

" + "documentation":"

One or more filters.

  • deliver-log-status - The status of the logs delivery (SUCCESS | FAILED).

  • log-destination-type - The type of destination to which the flow log publishes data. Possible destination types include cloud-watch-logs and s3.

  • flow-log-id - The ID of the flow log.

  • log-group-name - The name of the log group.

  • resource-id - The ID of the VPC, subnet, or network interface.

  • traffic-type - The type of traffic (ACCEPT | REJECT | ALL).

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

" }, "FlowLogIds":{ "shape":"FlowLogIdList", @@ -13892,7 +14110,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters. Filter names and values are case-sensitive.

  • location - This depends on the location type. For example, if the location type is region (default), the location is the Region code (for example, us-east-2.)

  • instance-type - The instance type.

", + "documentation":"

One or more filters. Filter names and values are case-sensitive.

  • location - This depends on the location type. For example, if the location type is region (default), the location is the Region code (for example, us-east-2.)

  • instance-type - The instance type. For example, c5.2xlarge.

", "locationName":"Filter" }, "MaxResults":{ @@ -13934,7 +14152,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters. Filter names and values are case-sensitive.

  • auto-recovery-supported - Indicates whether auto recovery is supported. (true | false)

  • bare-metal - Indicates whether it is a bare metal instance type. (true | false)

  • burstable-performance-supported - Indicates whether it is a burstable performance instance type. (true | false)

  • current-generation - Indicates whether this instance type is the latest generation instance type of an instance family. (true | false)

  • ebs-info.ebs-optimized-info.baseline-bandwidth-in-mbps - The baseline bandwidth performance for an EBS-optimized instance type, in Mbps.

  • ebs-info.ebs-optimized-info.baseline-throughput-in-mbps - The baseline throughput performance for an EBS-optimized instance type, in MBps.

  • ebs-info.ebs-optimized-info.baseline-iops - The baseline input/output storage operations per second for an EBS-optimized instance type.

  • ebs-info.ebs-optimized-info.maximum-bandwidth-in-mbps - The maximum bandwidth performance for an EBS-optimized instance type, in Mbps.

  • ebs-info.ebs-optimized-info.maximum-throughput-in-mbps - The maximum throughput performance for an EBS-optimized instance type, in MBps.

  • ebs-info.ebs-optimized-info.maximum-iops - The maximum input/output storage operations per second for an EBS-optimized instance type.

  • ebs-info.ebs-optimized-support - Indicates whether the instance type is EBS-optimized. (supported | unsupported | default)

  • ebs-info.encryption-support - Indicates whether EBS encryption is supported. (supported | unsupported)

  • ebs-info.nvme-support - Indicates whether non-volatile memory express (NVMe) is supported or required. (required | supported | unsupported)

  • free-tier-eligible - Indicates whether the instance type is eligible to use in the free tier. (true | false)

  • hibernation-supported - Indicates whether On-Demand hibernation is supported. (true | false)

  • hypervisor - The hypervisor used. (nitro | xen)

  • instance-storage-info.disk.count - The number of local disks.

  • instance-storage-info.disk.size-in-gb - The storage size of each instance storage disk, in GB.

  • instance-storage-info.disk.type - The storage technology for the local instance storage disks. (hdd | ssd)

  • instance-storage-info.total-size-in-gb - The total amount of storage available from all local instance storage, in GB.

  • instance-storage-supported - Indicates whether the instance type has local instance storage. (true | false)

  • memory-info.size-in-mib - The memory size.

  • network-info.ena-support - Indicates whether Elastic Network Adapter (ENA) is supported or required. (required | supported | unsupported)

  • network-info.efa-supported - Indicates whether the instance type supports Elastic Fabric Adapter (EFA). (true | false)

  • network-info.ipv4-addresses-per-interface - The maximum number of private IPv4 addresses per network interface.

  • network-info.ipv6-addresses-per-interface - The maximum number of private IPv6 addresses per network interface.

  • network-info.ipv6-supported - Indicates whether the instance type supports IPv6. (true | false)

  • network-info.maximum-network-interfaces - The maximum number of network interfaces per instance.

  • network-info.network-performance - Describes the network performance.

  • processor-info.sustained-clock-speed-in-ghz - The CPU clock speed, in GHz.

  • vcpu-info.default-cores - The default number of cores for the instance type.

  • vcpu-info.default-threads-per-core - The default number of threads per core for the instance type.

  • vcpu-info.default-vcpus - The default number of vCPUs for the instance type.

", + "documentation":"

One or more filters. Filter names and values are case-sensitive.

  • auto-recovery-supported - Indicates whether auto recovery is supported (true | false).

  • bare-metal - Indicates whether it is a bare metal instance type (true | false).

  • burstable-performance-supported - Indicates whether it is a burstable performance instance type (true | false).

  • current-generation - Indicates whether this instance type is the latest generation instance type of an instance family (true | false).

  • ebs-info.ebs-optimized-info.baseline-bandwidth-in-mbps - The baseline bandwidth performance for an EBS-optimized instance type, in Mbps.

  • ebs-info.ebs-optimized-info.baseline-iops - The baseline input/output storage operations per second for an EBS-optimized instance type.

  • ebs-info.ebs-optimized-info.baseline-throughput-in-mbps - The baseline throughput performance for an EBS-optimized instance type, in MB/s.

  • ebs-info.ebs-optimized-info.maximum-bandwidth-in-mbps - The maximum bandwidth performance for an EBS-optimized instance type, in Mbps.

  • ebs-info.ebs-optimized-info.maximum-iops - The maximum input/output storage operations per second for an EBS-optimized instance type.

  • ebs-info.ebs-optimized-info.maximum-throughput-in-mbps - The maximum throughput performance for an EBS-optimized instance type, in MB/s.

  • ebs-info.ebs-optimized-support - Indicates whether the instance type is EBS-optimized (supported | unsupported | default).

  • ebs-info.encryption-support - Indicates whether EBS encryption is supported (supported | unsupported).

  • ebs-info.nvme-support - Indicates whether non-volatile memory express (NVMe) is supported for EBS volumes (required | supported | unsupported).

  • free-tier-eligible - Indicates whether the instance type is eligible to use in the free tier (true | false).

  • hibernation-supported - Indicates whether On-Demand hibernation is supported (true | false).

  • hypervisor - The hypervisor (nitro | xen).

  • instance-storage-info.disk.count - The number of local disks.

  • instance-storage-info.disk.size-in-gb - The storage size of each instance storage disk, in GB.

  • instance-storage-info.disk.type - The storage technology for the local instance storage disks (hdd | ssd).

  • instance-storage-info.nvme-support - Indicates whether non-volatile memory express (NVMe) is supported for instance store (required | supported) | unsupported).

  • instance-storage-info.total-size-in-gb - The total amount of storage available from all local instance storage, in GB.

  • instance-storage-supported - Indicates whether the instance type has local instance storage (true | false).

  • instance-type - The instance type (for example c5.2xlarge or c5*).

  • memory-info.size-in-mib - The memory size.

  • network-info.efa-supported - Indicates whether the instance type supports Elastic Fabric Adapter (EFA) (true | false).

  • network-info.ena-support - Indicates whether Elastic Network Adapter (ENA) is supported or required (required | supported | unsupported).

  • network-info.ipv4-addresses-per-interface - The maximum number of private IPv4 addresses per network interface.

  • network-info.ipv6-addresses-per-interface - The maximum number of private IPv6 addresses per network interface.

  • network-info.ipv6-supported - Indicates whether the instance type supports IPv6 (true | false).

  • network-info.maximum-network-interfaces - The maximum number of network interfaces per instance.

  • network-info.network-performance - The network performance (for example, \"25 Gigabit\").

  • processor-info.supported-architecture - The CPU architecture (arm64 | i386 | x86_64).

  • processor-info.sustained-clock-speed-in-ghz - The CPU clock speed, in GHz.

  • supported-root-device-type - The root device type (ebs | instance-store).

  • supported-usage-class - The usage class (on-demand | spot).

  • supported-virtualization-type - The virtualization type (hvm | paravirtual).

  • vcpu-info.default-cores - The default number of cores for the instance type.

  • vcpu-info.default-threads-per-core - The default number of threads per core for the instance type.

  • vcpu-info.default-vcpus - The default number of vCPUs for the instance type.

  • vcpu-info.valid-cores - The number of cores that can be configured for the instance type.

  • vcpu-info.valid-threads-per-core - The number of threads per core that can be configured for the instance type. For example, \"1\" or \"1,2\".

", "locationName":"Filter" }, "MaxResults":{ @@ -15508,7 +15726,7 @@ }, "MaxResults":{ "shape":"Integer", - "documentation":"

The maximum number of snapshot results returned by DescribeSnapshots in paginated output. When this parameter is used, DescribeSnapshots only returns MaxResults results in a single page along with a NextToken response element. The remaining results of the initial request can be seen by sending another DescribeSnapshots request with the returned NextToken value. This value can be between 5 and 1000; if MaxResults is given a value larger than 1000, only 1000 results are returned. If this parameter is not used, then DescribeSnapshots returns all results. You cannot specify this parameter and the snapshot IDs parameter in the same request.

" + "documentation":"

The maximum number of snapshot results returned by DescribeSnapshots in paginated output. When this parameter is used, DescribeSnapshots only returns MaxResults results in a single page along with a NextToken response element. The remaining results of the initial request can be seen by sending another DescribeSnapshots request with the returned NextToken value. This value can be between 5 and 1,000; if MaxResults is given a value larger than 1,000, only 1,000 results are returned. If this parameter is not used, then DescribeSnapshots returns all results. You cannot specify this parameter and the snapshot IDs parameter in the same request.

" }, "NextToken":{ "shape":"String", @@ -15794,7 +16012,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

  • availability-zone - The Availability Zone for which prices should be returned.

  • instance-type - The type of instance (for example, m3.medium).

  • product-description - The product description for the Spot price (Linux/UNIX | SUSE Linux | Windows | Linux/UNIX (Amazon VPC) | SUSE Linux (Amazon VPC) | Windows (Amazon VPC)).

  • spot-price - The Spot price. The value must match exactly (or use wildcards; greater than or less than comparison is not supported).

  • timestamp - The time stamp of the Spot price history, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). You can use wildcards (* and ?). Greater than or less than comparison is not supported.

", + "documentation":"

One or more filters.

  • availability-zone - The Availability Zone for which prices should be returned.

  • instance-type - The type of instance (for example, m3.medium).

  • product-description - The product description for the Spot price (Linux/UNIX | Red Hat Enterprise Linux | SUSE Linux | Windows | Linux/UNIX (Amazon VPC) | Red Hat Enterprise Linux (Amazon VPC) | SUSE Linux (Amazon VPC) | Windows (Amazon VPC)).

  • spot-price - The Spot price. The value must match exactly (or use wildcards; greater than or less than comparison is not supported).

  • timestamp - The time stamp of the Spot price history, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). You can use wildcards (* and ?). Greater than or less than comparison is not supported.

", "locationName":"Filter" }, "AvailabilityZone":{ @@ -16208,7 +16426,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters. The possible values are:

  • transit-gateway-attachment-id - The ID of the transit gateway attachment.

  • local-owner-id - The ID of your AWS account.

  • remote-owner-id - The ID of the AWS account in the remote Region that owns the transit gateway.

  • state - The state of the peering attachment. Valid values are available | deleted | deleting | failed | failing | initiatingRequest | modifying | pendingAcceptance | pending | rollingBack | rejected | rejecting).

  • transit-gateway-id - The ID of the transit gateway.

", + "documentation":"

One or more filters. The possible values are:

  • transit-gateway-attachment-id - The ID of the transit gateway attachment.

  • local-owner-id - The ID of your AWS account.

  • remote-owner-id - The ID of the AWS account in the remote Region that owns the transit gateway.

  • state - The state of the peering attachment. Valid values are available | deleted | deleting | failed | failing | initiatingRequest | modifying | pendingAcceptance | pending | rollingBack | rejected | rejecting).

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value.

  • transit-gateway-id - The ID of the transit gateway.

", "locationName":"Filter" }, "MaxResults":{ @@ -16415,7 +16633,7 @@ }, "MaxResults":{ "shape":"Integer", - "documentation":"

The maximum number of volume results returned by DescribeVolumeStatus in paginated output. When this parameter is used, the request only returns MaxResults results in a single page along with a NextToken response element. The remaining results of the initial request can be seen by sending another request with the returned NextToken value. This value can be between 5 and 1000; if MaxResults is given a value larger than 1000, only 1000 results are returned. If this parameter is not used, then DescribeVolumeStatus returns all results. You cannot specify this parameter and the volume IDs parameter in the same request.

" + "documentation":"

The maximum number of volume results returned by DescribeVolumeStatus in paginated output. When this parameter is used, the request only returns MaxResults results in a single page along with a NextToken response element. The remaining results of the initial request can be seen by sending another request with the returned NextToken value. This value can be between 5 and 1,000; if MaxResults is given a value larger than 1,000, only 1,000 results are returned. If this parameter is not used, then DescribeVolumeStatus returns all results. You cannot specify this parameter and the volume IDs parameter in the same request.

" }, "NextToken":{ "shape":"String", @@ -16495,7 +16713,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

The filters.

  • attachment.attach-time - The time stamp when the attachment initiated.

  • attachment.delete-on-termination - Whether the volume is deleted on instance termination.

  • attachment.device - The device name specified in the block device mapping (for example, /dev/sda1).

  • attachment.instance-id - The ID of the instance the volume is attached to.

  • attachment.status - The attachment state (attaching | attached | detaching).

  • availability-zone - The Availability Zone in which the volume was created.

  • create-time - The time stamp when the volume was created.

  • encrypted - Indicates whether the volume is encrypted (true | false)

  • multi-attach-enabled - Indicates whether the volume is enabled for Multi-Attach (true | false)

  • fast-restored - Indicates whether the volume was created from a snapshot that is enabled for fast snapshot restore (true | false).

  • size - The size of the volume, in GiB.

  • snapshot-id - The snapshot from which the volume was created.

  • status - The state of the volume (creating | available | in-use | deleting | deleted | error).

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

  • volume-id - The volume ID.

  • volume-type - The Amazon EBS volume type. This can be gp2 for General Purpose SSD, io1 or io2 for Provisioned IOPS SSD, st1 for Throughput Optimized HDD, sc1 for Cold HDD, or standard for Magnetic volumes.

", + "documentation":"

The filters.

  • attachment.attach-time - The time stamp when the attachment initiated.

  • attachment.delete-on-termination - Whether the volume is deleted on instance termination.

  • attachment.device - The device name specified in the block device mapping (for example, /dev/sda1).

  • attachment.instance-id - The ID of the instance the volume is attached to.

  • attachment.status - The attachment state (attaching | attached | detaching).

  • availability-zone - The Availability Zone in which the volume was created.

  • create-time - The time stamp when the volume was created.

  • encrypted - Indicates whether the volume is encrypted (true | false)

  • multi-attach-enabled - Indicates whether the volume is enabled for Multi-Attach (true | false)

  • fast-restored - Indicates whether the volume was created from a snapshot that is enabled for fast snapshot restore (true | false).

  • size - The size of the volume, in GiB.

  • snapshot-id - The snapshot from which the volume was created.

  • status - The state of the volume (creating | available | in-use | deleting | deleted | error).

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

  • volume-id - The volume ID.

  • volume-type - The Amazon EBS volume type (gp2 | gp3 | io1 | io2 | st1 | sc1| standard)

", "locationName":"Filter" }, "VolumeIds":{ @@ -16876,7 +17094,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

  • service-name - The name of the service.

  • vpc-id - The ID of the VPC in which the endpoint resides.

  • vpc-endpoint-id - The ID of the endpoint.

  • vpc-endpoint-state - The state of the endpoint (pendingAcceptance | pending | available | deleting | deleted | rejected | failed).

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

", + "documentation":"

One or more filters.

  • service-name - The name of the service.

  • vpc-id - The ID of the VPC in which the endpoint resides.

  • vpc-endpoint-id - The ID of the endpoint.

  • vpc-endpoint-state - The state of the endpoint (pendingAcceptance | pending | available | deleting | deleted | rejected | failed).

  • vpc-endpoint-type - The type of VPC endpoint (Interface | Gateway | GatewayLoadBalancer).

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

", "locationName":"Filter" }, "MaxResults":{ @@ -17634,6 +17852,33 @@ } } }, + "DisassociateEnclaveCertificateIamRoleRequest":{ + "type":"structure", + "members":{ + "CertificateArn":{ + "shape":"ResourceArn", + "documentation":"

The ARN of the ACM certificate from which to disassociate the IAM role.

" + }, + "RoleArn":{ + "shape":"ResourceArn", + "documentation":"

The ARN of the IAM role to disassociate.

" + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + } + } + }, + "DisassociateEnclaveCertificateIamRoleResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "documentation":"

Returns true if the request succeeds; otherwise, it returns an error.

", + "locationName":"return" + } + } + }, "DisassociateIamInstanceProfileRequest":{ "type":"structure", "required":["AssociationId"], @@ -17994,7 +18239,7 @@ }, "Iops":{ "shape":"Integer", - "documentation":"

The number of I/O operations per second (IOPS) that the volume supports. For io1 and io2 volumes, this represents the number of IOPS that are provisioned for the volume. For gp2 volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting. For more information, see Amazon EBS volume types in the Amazon Elastic Compute Cloud User Guide.

Constraints: Range is 100-16,000 IOPS for gp2 volumes and 100 to 64,000 IOPS for io1 and io2 volumes in most Regions. Maximum io1 and io2 IOPS of 64,000 is guaranteed only on Nitro-based instances. Other instance families guarantee performance up to 32,000 IOPS. For more information, see Amazon EBS Volume Types in the Amazon Elastic Compute Cloud User Guide.

Condition: This parameter is required for requests to create io1 and io2 volumes; it is not used in requests to create gp2, st1, sc1, or standard volumes.

", + "documentation":"

The number of I/O operations per second (IOPS). For gp3, io1, and io2 volumes, this represents the number of IOPS that are provisioned for the volume. For gp2 volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting.

The following are the supported values for each volume type:

  • gp3: 3,000-16,000 IOPS

  • io1: 100-64,000 IOPS

  • io2: 100-64,000 IOPS

For io1 and io2 volumes, we guarantee 64,000 IOPS only for Instances built on the Nitro System. Other instance families guarantee performance up to 32,000 IOPS.

This parameter is required for io1 and io2 volumes. The default for gp3 volumes is 3,000 IOPS. This parameter is not supported for gp2, st1, sc1, or standard volumes.

", "locationName":"iops" }, "SnapshotId":{ @@ -18004,18 +18249,23 @@ }, "VolumeSize":{ "shape":"Integer", - "documentation":"

The size of the volume, in GiB.

Default: If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size.

Constraints: 1-16384 for General Purpose SSD (gp2), 4-16384 for Provisioned IOPS SSD (io1 and io2), 500-16384 for Throughput Optimized HDD (st1), 500-16384 for Cold HDD (sc1), and 1-1024 for Magnetic (standard) volumes. If you specify a snapshot, the volume size must be equal to or larger than the snapshot size.

", + "documentation":"

The size of the volume, in GiBs. You must specify either a snapshot ID or a volume size. If you specify a snapshot, the default is the snapshot size. You can specify a volume size that is equal to or larger than the snapshot size.

The following are the supported volumes sizes for each volume type:

  • gp2 and gp3:1-16,384

  • io1 and io2: 4-16,384

  • st1: 500-16,384

  • sc1: 500-16,384

  • standard: 1-1,024

", "locationName":"volumeSize" }, "VolumeType":{ "shape":"VolumeType", - "documentation":"

The volume type. If you set the type to io1 or io2, you must also specify the Iops parameter. If you set the type to gp2, st1, sc1, or standard, you must omit the Iops parameter.

Default: gp2

", + "documentation":"

The volume type. For more information, see Amazon EBS volume types in the Amazon Elastic Compute Cloud User Guide. If the volume type is io1 or io2, you must specify the IOPS that the volume supports.

", "locationName":"volumeType" }, "KmsKeyId":{ "shape":"String", "documentation":"

Identifier (key ID, key alias, ID ARN, or alias ARN) for a customer managed CMK under which the EBS volume is encrypted.

This parameter is only supported on BlockDeviceMapping objects called by RunInstances, RequestSpotFleet, and RequestSpotInstances.

" }, + "Throughput":{ + "shape":"Integer", + "documentation":"

The throughput that the volume supports, in MiB/s.

This parameter is valid only for gp3 volumes.

Valid Range: Minimum value of 125. Maximum value of 1000.

", + "locationName":"throughput" + }, "Encrypted":{ "shape":"Boolean", "documentation":"

Indicates whether the encryption state of an EBS volume is changed while being restored from a backing snapshot. The effect of setting the encryption state to true depends on the volume origin (new or from a snapshot), starting encryption state, ownership, and whether encryption by default is enabled. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

In no case can you remove encryption from an encrypted volume.

Encrypted volumes can only be attached to instances that support Amazon EBS encryption. For more information, see Supported instance types.

This parameter is not returned by .

", @@ -18036,7 +18286,7 @@ "members":{ "EbsOptimizedSupport":{ "shape":"EbsOptimizedSupport", - "documentation":"

Indicates that the instance type is Amazon EBS-optimized. For more information, see Amazon EBS-Optimized Instances in Amazon EC2 User Guide for Linux Instances.

", + "documentation":"

Indicates whether the instance type is Amazon EBS-optimized. For more information, see Amazon EBS-Optimized Instances in Amazon EC2 User Guide for Linux Instances.

", "locationName":"ebsOptimizedSupport" }, "EncryptionSupport":{ @@ -18117,7 +18367,7 @@ }, "BaselineThroughputInMBps":{ "shape":"BaselineThroughputInMBps", - "documentation":"

The baseline throughput performance for an EBS-optimized instance type, in MBps.

", + "documentation":"

The baseline throughput performance for an EBS-optimized instance type, in MB/s.

", "locationName":"baselineThroughputInMBps" }, "BaselineIops":{ @@ -18132,7 +18382,7 @@ }, "MaximumThroughputInMBps":{ "shape":"MaximumThroughputInMBps", - "documentation":"

The maximum throughput performance for an EBS-optimized instance type, in MBps.

", + "documentation":"

The maximum throughput performance for an EBS-optimized instance type, in MB/s.

", "locationName":"maximumThroughputInMBps" }, "MaximumIops":{ @@ -18711,6 +18961,27 @@ } } }, + "EnclaveOptions":{ + "type":"structure", + "members":{ + "Enabled":{ + "shape":"Boolean", + "documentation":"

If this parameter is set to true, the instance is enabled for AWS Nitro Enclaves; otherwise, it is not enabled for AWS Nitro Enclaves.

", + "locationName":"enabled" + } + }, + "documentation":"

Indicates whether the instance is enabled for AWS Nitro Enclaves.

" + }, + "EnclaveOptionsRequest":{ + "type":"structure", + "members":{ + "Enabled":{ + "shape":"Boolean", + "documentation":"

To enable the instance for AWS Nitro Enclaves, set this parameter to true.

" + } + }, + "documentation":"

Indicates whether the instance is enabled for AWS Nitro Enclaves. For more information, see What is AWS Nitro Enclaves? in the AWS Nitro Enclaves User Guide.

" + }, "EndDateType":{ "type":"string", "enum":[ @@ -18725,6 +18996,14 @@ "locationName":"item" } }, + "EphemeralNvmeSupport":{ + "type":"string", + "enum":[ + "unsupported", + "supported", + "required" + ] + }, "ErrorSet":{ "type":"list", "member":{ @@ -19232,9 +19511,14 @@ "shape":"String", "documentation":"

The Amazon Resource Name (ARN) of the IAM SAML identity provider.

", "locationName":"samlProviderArn" + }, + "SelfServiceSamlProviderArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the IAM SAML identity provider for the self-service portal.

", + "locationName":"selfServiceSamlProviderArn" } }, - "documentation":"

Describes the IAM SAML identity provider used for federated authentication.

" + "documentation":"

Describes the IAM SAML identity providers used for federated authentication.

" }, "FederatedAuthenticationRequest":{ "type":"structure", @@ -19242,6 +19526,10 @@ "SAMLProviderArn":{ "shape":"String", "documentation":"

The Amazon Resource Name (ARN) of the IAM SAML identity provider.

" + }, + "SelfServiceSAMLProviderArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the IAM SAML identity provider for the self-service portal.

" } }, "documentation":"

The IAM SAML identity provider used for federated authentication.

" @@ -19436,7 +19724,8 @@ "shape":"FleetLaunchTemplateConfigRequest", "locationName":"item" }, - "max":50 + "max":50, + "min":0 }, "FleetLaunchTemplateConfigRequest":{ "type":"structure", @@ -19506,7 +19795,8 @@ "shape":"FleetLaunchTemplateOverridesRequest", "locationName":"item" }, - "max":50 + "max":50, + "min":0 }, "FleetLaunchTemplateOverridesRequest":{ "type":"structure", @@ -19588,6 +19878,10 @@ "prioritized" ] }, + "FleetReplacementStrategy":{ + "type":"string", + "enum":["launch"] + }, "FleetSet":{ "type":"list", "member":{ @@ -19595,6 +19889,48 @@ "locationName":"item" } }, + "FleetSpotCapacityRebalance":{ + "type":"structure", + "members":{ + "ReplacementStrategy":{ + "shape":"FleetReplacementStrategy", + "documentation":"

To allow EC2 Fleet to launch a replacement Spot Instance when an instance rebalance notification is emitted for an existing Spot Instance in the fleet, specify launch. Only available for fleets of type maintain.

When a replacement instance is launched, the instance marked for rebalance is not automatically terminated. You can terminate it, or you can leave it running. You are charged for both instances while they are running.

", + "locationName":"replacementStrategy" + } + }, + "documentation":"

The strategy to use when Amazon EC2 emits a signal that your Spot Instance is at an elevated risk of being interrupted.

" + }, + "FleetSpotCapacityRebalanceRequest":{ + "type":"structure", + "members":{ + "ReplacementStrategy":{ + "shape":"FleetReplacementStrategy", + "documentation":"

The replacement strategy to use. Only available for fleets of type maintain.

To allow EC2 Fleet to launch a replacement Spot Instance when an instance rebalance notification is emitted for an existing Spot Instance in the fleet, specify launch. You must specify a value, otherwise you get an error.

When a replacement instance is launched, the instance marked for rebalance is not automatically terminated. You can terminate it, or you can leave it running. You are charged for all instances while they are running.

" + } + }, + "documentation":"

The Spot Instance replacement strategy to use when Amazon EC2 emits a signal that your Spot Instance is at an elevated risk of being interrupted. For more information, see Capacity rebalancing in the Amazon Elastic Compute Cloud User Guide.

" + }, + "FleetSpotMaintenanceStrategies":{ + "type":"structure", + "members":{ + "CapacityRebalance":{ + "shape":"FleetSpotCapacityRebalance", + "documentation":"

The strategy to use when Amazon EC2 emits a signal that your Spot Instance is at an elevated risk of being interrupted.

", + "locationName":"capacityRebalance" + } + }, + "documentation":"

The strategies for managing your Spot Instances that are at an elevated risk of being interrupted.

" + }, + "FleetSpotMaintenanceStrategiesRequest":{ + "type":"structure", + "members":{ + "CapacityRebalance":{ + "shape":"FleetSpotCapacityRebalanceRequest", + "documentation":"

The strategy to use when Amazon EC2 emits a signal that your Spot Instance is at an elevated risk of being interrupted.

" + } + }, + "documentation":"

The strategies for managing your Spot Instances that are at an elevated risk of being interrupted.

" + }, "FleetStateCode":{ "type":"string", "enum":[ @@ -19762,7 +20098,7 @@ "members":{ "SizeInMiB":{ "shape":"FpgaDeviceMemorySize", - "documentation":"

The size (in MiB) for the memory available to the FPGA accelerator.

", + "documentation":"

The size of the memory available to the FPGA accelerator, in MiB.

", "locationName":"sizeInMiB" } }, @@ -19952,6 +20288,29 @@ "type":"string", "enum":["ipsec.1"] }, + "GetAssociatedEnclaveCertificateIamRolesRequest":{ + "type":"structure", + "members":{ + "CertificateArn":{ + "shape":"ResourceArn", + "documentation":"

The ARN of the ACM certificate for which to view the associated IAM roles, encryption keys, and Amazon S3 object information.

" + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + } + } + }, + "GetAssociatedEnclaveCertificateIamRolesResult":{ + "type":"structure", + "members":{ + "AssociatedRoles":{ + "shape":"AssociatedRolesList", + "documentation":"

Information about the associated IAM roles.

", + "locationName":"associatedRoleSet" + } + } + }, "GetAssociatedIpv6PoolCidrsRequest":{ "type":"structure", "required":["PoolId"], @@ -20784,7 +21143,7 @@ "members":{ "SizeInMiB":{ "shape":"GpuDeviceMemorySize", - "documentation":"

The size (in MiB) for the memory available to the GPU accelerator.

", + "documentation":"

The size of the memory available to the GPU accelerator, in MiB.

", "locationName":"sizeInMiB" } }, @@ -20802,7 +21161,7 @@ }, "TotalGpuMemoryInMiB":{ "shape":"totalGpuMemory", - "documentation":"

The total size of the memory for the GPU accelerators for the instance type.

", + "documentation":"

The total size of the memory for the GPU accelerators for the instance type, in MiB.

", "locationName":"totalGpuMemoryInMiB" } }, @@ -22683,6 +23042,11 @@ "shape":"InstanceMetadataOptionsResponse", "documentation":"

The metadata options for the instance.

", "locationName":"metadataOptions" + }, + "EnclaveOptions":{ + "shape":"EnclaveOptions", + "documentation":"

Indicates whether the instance is enabled for AWS Nitro Enclaves.

", + "locationName":"enclaveOptions" } }, "documentation":"

Describes an instance.

" @@ -22710,6 +23074,11 @@ "documentation":"

Indicates whether enhanced networking with ENA is enabled.

", "locationName":"enaSupport" }, + "EnclaveOptions":{ + "shape":"EnclaveOptions", + "documentation":"

To enable the instance for AWS Nitro Enclaves, set this parameter to true; otherwise, set it to false.

", + "locationName":"enclaveOptions" + }, "EbsOptimized":{ "shape":"AttributeBooleanValue", "documentation":"

Indicates whether the instance is optimized for Amazon EBS I/O.

", @@ -22784,7 +23153,8 @@ "groupSet", "ebsOptimized", "sriovNetSupport", - "enaSupport" + "enaSupport", + "enclaveOptions" ] }, "InstanceBlockDeviceMapping":{ @@ -23298,6 +23668,11 @@ "shape":"AttachmentStatus", "documentation":"

The attachment state.

", "locationName":"status" + }, + "NetworkCardIndex":{ + "shape":"Integer", + "documentation":"

The index of the network card.

", + "locationName":"networkCardIndex" } }, "documentation":"

Describes a network interface attachment.

" @@ -23380,7 +23755,11 @@ }, "InterfaceType":{ "shape":"String", - "documentation":"

The type of network interface. To create an Elastic Fabric Adapter (EFA), specify efa. For more information, see Elastic Fabric Adapter in the Amazon Elastic Compute Cloud User Guide.

If you are not creating an EFA, specify interface or omit this parameter.

Valid values: interface | efa

" + "documentation":"

The type of network interface.

To create an Elastic Fabric Adapter (EFA), specify efa. For more information, see Elastic Fabric Adapter in the Amazon Elastic Compute Cloud User Guide.

If you are not creating an EFA, specify interface or omit this parameter.

Valid values: interface | efa

" + }, + "NetworkCardIndex":{ + "shape":"Integer", + "documentation":"

The index of the network card. Some instance types support multiple network cards. The primary network interface must be assigned to network card index 0. The default is network card index 0.

" } }, "documentation":"

Describes a network interface.

" @@ -23640,8 +24019,13 @@ }, "Disks":{ "shape":"DiskInfoList", - "documentation":"

Array describing the disks that are available for the instance type.

", + "documentation":"

Describes the disks that are available for the instance type.

", "locationName":"disks" + }, + "NvmeSupport":{ + "shape":"EphemeralNvmeSupport", + "documentation":"

Indicates whether non-volatile memory express (NVMe) is supported for instance store.

", + "locationName":"nvmeSupport" } }, "documentation":"

Describes the disks that are available for the instance type.

" @@ -23747,6 +24131,15 @@ "r5a.12xlarge", "r5a.16xlarge", "r5a.24xlarge", + "r5b.large", + "r5b.xlarge", + "r5b.2xlarge", + "r5b.4xlarge", + "r5b.8xlarge", + "r5b.12xlarge", + "r5b.16xlarge", + "r5b.24xlarge", + "r5b.metal", "r5d.large", "r5d.xlarge", "r5d.2xlarge", @@ -23904,10 +24297,21 @@ "p3.8xlarge", "p3.16xlarge", "p3dn.24xlarge", + "p4d.24xlarge", "d2.xlarge", "d2.2xlarge", "d2.4xlarge", "d2.8xlarge", + "d3.xlarge", + "d3.2xlarge", + "d3.4xlarge", + "d3.8xlarge", + "d3en.xlarge", + "d3en.2xlarge", + "d3en.4xlarge", + "d3en.6xlarge", + "d3en.8xlarge", + "d3en.12xlarge", "f1.2xlarge", "f1.4xlarge", "f1.16xlarge", @@ -23945,6 +24349,13 @@ "m5ad.12xlarge", "m5ad.16xlarge", "m5ad.24xlarge", + "m5zn.large", + "m5zn.xlarge", + "m5zn.2xlarge", + "m5zn.3xlarge", + "m5zn.6xlarge", + "m5zn.12xlarge", + "m5zn.metal", "h1.2xlarge", "h1.4xlarge", "h1.8xlarge", @@ -24020,7 +24431,8 @@ "m6gd.4xlarge", "m6gd.8xlarge", "m6gd.12xlarge", - "m6gd.16xlarge" + "m6gd.16xlarge", + "mac1.metal" ] }, "InstanceTypeHypervisor":{ @@ -24040,7 +24452,7 @@ }, "CurrentGeneration":{ "shape":"CurrentGenerationFlag", - "documentation":"

Indicates whether the instance type is a current generation.

", + "documentation":"

Indicates whether the instance type is current generation.

", "locationName":"currentGeneration" }, "FreeTierEligible":{ @@ -24055,7 +24467,7 @@ }, "SupportedRootDeviceTypes":{ "shape":"RootDeviceTypeList", - "documentation":"

Indicates the supported root device types.

", + "documentation":"

The supported root device types.

", "locationName":"supportedRootDeviceTypes" }, "SupportedVirtualizationTypes":{ @@ -24065,12 +24477,12 @@ }, "BareMetal":{ "shape":"BareMetalFlag", - "documentation":"

Indicates whether the instance is bare metal.

", + "documentation":"

Indicates whether the instance is a bare metal instance type.

", "locationName":"bareMetal" }, "Hypervisor":{ "shape":"InstanceTypeHypervisor", - "documentation":"

Indicates the hypervisor used for the instance type.

", + "documentation":"

The hypervisor for the instance type.

", "locationName":"hypervisor" }, "ProcessorInfo":{ @@ -24095,7 +24507,7 @@ }, "InstanceStorageInfo":{ "shape":"InstanceStorageInfo", - "documentation":"

Describes the disks for the instance type.

", + "documentation":"

Describes the instance storage for the instance type.

", "locationName":"instanceStorageInfo" }, "EbsInfo":{ @@ -24951,6 +25363,11 @@ "shape":"VolumeType", "documentation":"

The volume type.

", "locationName":"volumeType" + }, + "Throughput":{ + "shape":"Integer", + "documentation":"

The throughput that the volume supports, in MiB/s.

", + "locationName":"throughput" } }, "documentation":"

Describes a block device for an EBS volume.

" @@ -24968,7 +25385,7 @@ }, "Iops":{ "shape":"Integer", - "documentation":"

The number of I/O operations per second (IOPS) to provision for an io1 or io2 volume, with a maximum ratio of 50 IOPS/GiB for io1, and 500 IOPS/GiB for io2. Range is 100 to 64,000 IOPS for volumes in most Regions. Maximum IOPS of 64,000 is guaranteed only on Nitro-based instances. Other instance families guarantee performance up to 32,000 IOPS. For more information, see Amazon EBS Volume Types in the Amazon Elastic Compute Cloud User Guide.

This parameter is valid only for Provisioned IOPS SSD (io1 and io2) volumes.

" + "documentation":"

The number of I/O operations per second (IOPS). For gp3, io1, and io2 volumes, this represents the number of IOPS that are provisioned for the volume. For gp2 volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting.

The following are the supported values for each volume type:

  • gp3: 3,000-16,000 IOPS

  • io1: 100-64,000 IOPS

  • io2: 100-64,000 IOPS

For io1 and io2 volumes, we guarantee 64,000 IOPS only for Instances built on the Nitro System. Other instance families guarantee performance up to 32,000 IOPS.

This parameter is required for io1 and io2 volumes. The default for gp3 volumes is 3,000 IOPS. This parameter is not supported for gp2, st1, sc1, or standard volumes.

" }, "KmsKeyId":{ "shape":"KmsKeyId", @@ -24980,11 +25397,15 @@ }, "VolumeSize":{ "shape":"Integer", - "documentation":"

The size of the volume, in GiB.

Default: If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size.

" + "documentation":"

The size of the volume, in GiBs. You must specify either a snapshot ID or a volume size. If you specify a snapshot, the default is the snapshot size. You can specify a volume size that is equal to or larger than the snapshot size.

The following are the supported volumes sizes for each volume type:

  • gp2 and gp3: 1-16,384

  • io1 and io2: 4-16,384

  • st1 and sc1: 125-16,384

  • standard: 1-1,024

" }, "VolumeType":{ "shape":"VolumeType", - "documentation":"

The volume type.

" + "documentation":"

The volume type. The default is gp2. For more information, see Amazon EBS volume types in the Amazon Elastic Compute Cloud User Guide.

" + }, + "Throughput":{ + "shape":"Integer", + "documentation":"

The throughput to provision for a gp3 volume, with a maximum of 1,000 MiB/s.

Valid Range: Minimum value of 125. Maximum value of 1000.

" } }, "documentation":"

The parameters for a block device for an EBS volume.

" @@ -25038,6 +25459,27 @@ "locationName":"item" } }, + "LaunchTemplateEnclaveOptions":{ + "type":"structure", + "members":{ + "Enabled":{ + "shape":"Boolean", + "documentation":"

If this parameter is set to true, the instance is enabled for AWS Nitro Enclaves; otherwise, it is not enabled for AWS Nitro Enclaves.

", + "locationName":"enabled" + } + }, + "documentation":"

Indicates whether the instance is enabled for AWS Nitro Enclaves.

" + }, + "LaunchTemplateEnclaveOptionsRequest":{ + "type":"structure", + "members":{ + "Enabled":{ + "shape":"Boolean", + "documentation":"

To enable the instance for AWS Nitro Enclaves, set this parameter to true.

" + } + }, + "documentation":"

Indicates whether the instance is enabled for AWS Nitro Enclaves. For more information, see What is AWS Nitro Enclaves? in the AWS Nitro Enclaves User Guide.

" + }, "LaunchTemplateErrorCode":{ "type":"string", "enum":[ @@ -25275,6 +25717,11 @@ "shape":"SubnetId", "documentation":"

The ID of the subnet for the network interface.

", "locationName":"subnetId" + }, + "NetworkCardIndex":{ + "shape":"Integer", + "documentation":"

The index of the network card.

", + "locationName":"networkCardIndex" } }, "documentation":"

Describes a network interface.

" @@ -25345,6 +25792,10 @@ "SubnetId":{ "shape":"SubnetId", "documentation":"

The ID of the subnet for the network interface.

" + }, + "NetworkCardIndex":{ + "shape":"Integer", + "documentation":"

The index of the network card. Some instance types support multiple network cards. The primary network interface must be assigned to network card index 0. The default is network card index 0.

" } }, "documentation":"

The parameters for a network interface.

" @@ -26355,6 +26806,7 @@ "MaxResults":{"type":"integer"}, "MaximumBandwidthInMbps":{"type":"integer"}, "MaximumIops":{"type":"integer"}, + "MaximumNetworkCards":{"type":"integer"}, "MaximumThroughputInMBps":{"type":"double"}, "MembershipType":{ "type":"string", @@ -26368,7 +26820,7 @@ "members":{ "SizeInMiB":{ "shape":"MemorySize", - "documentation":"

Size of the memory, in MiB.

", + "documentation":"

The size of the memory, in MiB.

", "locationName":"sizeInMiB" } }, @@ -26494,6 +26946,14 @@ "VpcId":{ "shape":"VpcId", "documentation":"

The ID of the VPC to associate with the Client VPN endpoint.

" + }, + "SelfServicePortal":{ + "shape":"SelfServicePortal", + "documentation":"

Specify whether to enable the self-service portal for the Client VPN endpoint.

" + }, + "ClientConnectOptions":{ + "shape":"ClientConnectOptions", + "documentation":"

The options for managing connection authorization for new client connections.

" } } }, @@ -26544,7 +27004,7 @@ "members":{ "KmsKeyId":{ "shape":"KmsKeyId", - "documentation":"

The identifier of the AWS Key Management Service (AWS KMS) customer master key (CMK) to use for Amazon EBS encryption. If this parameter is not specified, your AWS managed CMK for EBS is used. If KmsKeyId is specified, the encrypted state must be true.

You can specify the CMK using any of the following:

  • Key ID. For example, key/1234abcd-12ab-34cd-56ef-1234567890ab.

  • Key alias. For example, alias/ExampleAlias.

  • Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef.

  • Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.

AWS authenticates the CMK asynchronously. Therefore, if you specify an ID, alias, or ARN that is not valid, the action can appear to complete, but eventually fails.

Amazon EBS does not support asymmetric CMKs.

" + "documentation":"

The identifier of the AWS Key Management Service (AWS KMS) customer master key (CMK) to use for Amazon EBS encryption. If this parameter is not specified, your AWS managed CMK for EBS is used. If KmsKeyId is specified, the encrypted state must be true.

You can specify the CMK using any of the following:

  • Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab.

  • Key alias. For example, alias/ExampleAlias.

  • Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/1234abcd-12ab-34cd-56ef-1234567890ab.

  • Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.

AWS authenticates the CMK asynchronously. Therefore, if you specify an ID, alias, or ARN that is not valid, the action can appear to complete, but eventually fails.

Amazon EBS does not support asymmetric CMKs.

" }, "DryRun":{ "shape":"Boolean", @@ -26564,10 +27024,7 @@ }, "ModifyFleetRequest":{ "type":"structure", - "required":[ - "FleetId", - "TargetCapacitySpecification" - ], + "required":["FleetId"], "members":{ "DryRun":{ "shape":"Boolean", @@ -27620,6 +28077,10 @@ "Ipv6Support":{ "shape":"Ipv6SupportValue", "documentation":"

Enable or disable IPv6 support. The default is enable.

" + }, + "ApplianceModeSupport":{ + "shape":"ApplianceModeSupportValue", + "documentation":"

Enable or disable support for appliance mode. If enabled, a traffic flow between a source and destination uses the same Availability Zone for the VPC attachment for the lifetime of that flow. The default is disable.

" } }, "documentation":"

Describes the options for a VPC attachment.

" @@ -27667,15 +28128,19 @@ }, "Size":{ "shape":"Integer", - "documentation":"

The target size of the volume, in GiB. The target volume size must be greater than or equal to than the existing size of the volume. For information about available EBS volume sizes, see Amazon EBS Volume Types.

Default: If no size is specified, the existing size is retained.

" + "documentation":"

The target size of the volume, in GiB. The target volume size must be greater than or equal to the existing size of the volume.

The following are the supported volumes sizes for each volume type:

  • gp2 and gp3: 1-16,384

  • io1 and io2: 4-16,384

  • st1 and sc1: 125-16,384

  • standard: 1-1,024

Default: If no size is specified, the existing size is retained.

" }, "VolumeType":{ "shape":"VolumeType", - "documentation":"

The target EBS volume type of the volume.

Default: If no type is specified, the existing type is retained.

" + "documentation":"

The target EBS volume type of the volume. For more information, see Amazon EBS volume types in the Amazon Elastic Compute Cloud User Guide.

Default: If no type is specified, the existing type is retained.

" }, "Iops":{ "shape":"Integer", - "documentation":"

The target IOPS rate of the volume.

This is only valid for Provisioned IOPS SSD (io1 and io2) volumes. For moreinformation, see Provisioned IOPS SSD (io1 and io2) volumes.

Default: If no IOPS value is specified, the existing value is retained.

" + "documentation":"

The target IOPS rate of the volume. This parameter is valid only for gp3, io1, and io2 volumes.

The following are the supported values for each volume type:

  • gp3: 3,000-16,000 IOPS

  • io1: 100-64,000 IOPS

  • io2: 100-64,000 IOPS

Default: If no IOPS value is specified, the existing value is retained.

" + }, + "Throughput":{ + "shape":"Integer", + "documentation":"

The target throughput of the volume, in MiB/s. This parameter is valid only for gp3 volumes. The maximum value is 1,000.

Default: If no throughput value is specified, the existing value is retained.

Valid Range: Minimum value of 125. Maximum value of 1000.

" } } }, @@ -27758,7 +28223,7 @@ }, "PolicyDocument":{ "shape":"String", - "documentation":"

A policy to attach to the endpoint that controls access to the service. The policy must be in valid JSON format.

" + "documentation":"

(Interface and gateway endpoints) A policy to attach to the endpoint that controls access to the service. The policy must be in valid JSON format.

" }, "AddRouteTableIds":{ "shape":"VpcEndpointRouteTableIdList", @@ -27772,7 +28237,7 @@ }, "AddSubnetIds":{ "shape":"VpcEndpointSubnetIdList", - "documentation":"

(Interface endpoint) One or more subnet IDs in which to serve the endpoint.

", + "documentation":"

(Interface and Gateway Load Balancer endpoints) One or more subnet IDs in which to serve the endpoint. For a Gateway Load Balancer endpoint, you can specify only one subnet.

", "locationName":"AddSubnetId" }, "RemoveSubnetIds":{ @@ -27821,11 +28286,11 @@ }, "PrivateDnsName":{ "shape":"String", - "documentation":"

The private DNS name to assign to the endpoint service.

" + "documentation":"

(Interface endpoint configuration) The private DNS name to assign to the endpoint service.

" }, "RemovePrivateDnsName":{ "shape":"Boolean", - "documentation":"

Removes the private DNS name of the endpoint service.

" + "documentation":"

(Interface endpoint configuration) Removes the private DNS name of the endpoint service.

" }, "AcceptanceRequired":{ "shape":"Boolean", @@ -27840,6 +28305,16 @@ "shape":"ValueStringList", "documentation":"

The Amazon Resource Names (ARNs) of Network Load Balancers to remove from your service configuration.

", "locationName":"RemoveNetworkLoadBalancerArn" + }, + "AddGatewayLoadBalancerArns":{ + "shape":"ValueStringList", + "documentation":"

The Amazon Resource Names (ARNs) of Gateway Load Balancers to add to your service configuration.

", + "locationName":"AddGatewayLoadBalancerArn" + }, + "RemoveGatewayLoadBalancerArns":{ + "shape":"ValueStringList", + "documentation":"

The Amazon Resource Names (ARNs) of Gateway Load Balancers to remove from your service configuration.

", + "locationName":"RemoveGatewayLoadBalancerArn" } } }, @@ -28548,12 +29023,41 @@ "locationName":"item" } }, + "NetworkCardIndex":{"type":"integer"}, + "NetworkCardInfo":{ + "type":"structure", + "members":{ + "NetworkCardIndex":{ + "shape":"NetworkCardIndex", + "documentation":"

The index of the network card.

", + "locationName":"networkCardIndex" + }, + "NetworkPerformance":{ + "shape":"NetworkPerformance", + "documentation":"

The network performance of the network card.

", + "locationName":"networkPerformance" + }, + "MaximumNetworkInterfaces":{ + "shape":"MaxNetworkInterfaces", + "documentation":"

The maximum number of network interfaces for the network card.

", + "locationName":"maximumNetworkInterfaces" + } + }, + "documentation":"

Describes the network card support of the instance type.

" + }, + "NetworkCardInfoList":{ + "type":"list", + "member":{ + "shape":"NetworkCardInfo", + "locationName":"item" + } + }, "NetworkInfo":{ "type":"structure", "members":{ "NetworkPerformance":{ "shape":"NetworkPerformance", - "documentation":"

Describes the network performance.

", + "documentation":"

The network performance.

", "locationName":"networkPerformance" }, "MaximumNetworkInterfaces":{ @@ -28561,6 +29065,21 @@ "documentation":"

The maximum number of network interfaces for the instance type.

", "locationName":"maximumNetworkInterfaces" }, + "MaximumNetworkCards":{ + "shape":"MaximumNetworkCards", + "documentation":"

The maximum number of physical network cards that can be allocated to the instance.

", + "locationName":"maximumNetworkCards" + }, + "DefaultNetworkCardIndex":{ + "shape":"DefaultNetworkCardIndex", + "documentation":"

The index of the default network card, starting at 0.

", + "locationName":"defaultNetworkCardIndex" + }, + "NetworkCards":{ + "shape":"NetworkCardInfoList", + "documentation":"

Describes the network cards for the instance type.

", + "locationName":"networkCards" + }, "Ipv4AddressesPerInterface":{ "shape":"MaxIpv4AddrPerInterface", "documentation":"

The maximum number of IPv4 addresses per network interface.

", @@ -28725,7 +29244,7 @@ }, "PublicIp":{ "shape":"String", - "documentation":"

The address of the Elastic IP address or Carrier IP address bound to the network interface.

", + "documentation":"

The address of the Elastic IP address bound to the network interface.

", "locationName":"publicIp" }, "CustomerOwnedIp":{ @@ -28764,6 +29283,11 @@ "documentation":"

The device index of the network interface attachment on the instance.

", "locationName":"deviceIndex" }, + "NetworkCardIndex":{ + "shape":"Integer", + "documentation":"

The index of the network card.

", + "locationName":"networkCardIndex" + }, "InstanceId":{ "shape":"String", "documentation":"

The ID of the instance.

", @@ -29526,7 +30050,7 @@ "members":{ "SupportedStrategies":{ "shape":"PlacementGroupStrategyList", - "documentation":"

A list of supported placement groups types.

", + "documentation":"

The supported placement group types.

", "locationName":"supportedStrategies" } }, @@ -29882,6 +30406,24 @@ "Role" ] }, + "PrivateDnsDetails":{ + "type":"structure", + "members":{ + "PrivateDnsName":{ + "shape":"String", + "documentation":"

The private DNS name assigned to the VPC endpoint service.

", + "locationName":"privateDnsName" + } + }, + "documentation":"

Information about the Private DNS name for interface endpoints.

" + }, + "PrivateDnsDetailsSet":{ + "type":"list", + "member":{ + "shape":"PrivateDnsDetails", + "locationName":"item" + } + }, "PrivateDnsNameConfiguration":{ "type":"structure", "members":{ @@ -29950,7 +30492,7 @@ "members":{ "SupportedArchitectures":{ "shape":"ArchitectureTypeList", - "documentation":"

A list of architectures supported by the instance type.

", + "documentation":"

The architectures supported by the instance type.

", "locationName":"supportedArchitectures" }, "SustainedClockSpeedInGhz":{ @@ -31008,6 +31550,10 @@ "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "locationName":"dryRun" }, + "VpcEndpointId":{ + "shape":"VpcEndpointId", + "documentation":"

The ID of a VPC endpoint. Supported for Gateway Load Balancer endpoints only.

" + }, "EgressOnlyInternetGatewayId":{ "shape":"EgressOnlyInternetGatewayId", "documentation":"

[IPv6 traffic only] The ID of an egress-only internet gateway.

", @@ -31139,6 +31685,10 @@ } } }, + "ReplacementStrategy":{ + "type":"string", + "enum":["launch"] + }, "ReportInstanceReasonCodes":{ "type":"string", "enum":[ @@ -31223,7 +31773,8 @@ "type":"list", "member":{"shape":"InstanceType"}, "locationName":"InstanceType", - "max":100 + "max":100, + "min":0 }, "RequestLaunchTemplateData":{ "type":"structure", @@ -31339,6 +31890,10 @@ "MetadataOptions":{ "shape":"LaunchTemplateInstanceMetadataOptionsRequest", "documentation":"

The metadata options for the instance. For more information, see Instance Metadata and User Data in the Amazon Elastic Compute Cloud User Guide.

" + }, + "EnclaveOptions":{ + "shape":"LaunchTemplateEnclaveOptionsRequest", + "documentation":"

Indicates whether the instance is enabled for AWS Nitro Enclaves. For more information, see What is AWS Nitro Enclaves? in the AWS Nitro Enclaves User Guide.

You can't enable AWS Nitro Enclaves and hibernation on the same instance.

" } }, "documentation":"

The information to include in the launch template.

" @@ -31381,7 +31936,7 @@ }, "BlockDurationMinutes":{ "shape":"Integer", - "documentation":"

The required duration for the Spot Instances (also known as Spot blocks), in minutes. This value must be a multiple of 60 (60, 120, 180, 240, 300, or 360).

The duration period starts as soon as your Spot Instance receives its instance ID. At the end of the duration period, Amazon EC2 marks the Spot Instance for termination and provides a Spot Instance termination notice, which gives the instance a two-minute warning before it terminates.

You can't specify an Availability Zone group or a launch group if you specify a duration.

", + "documentation":"

The required duration for the Spot Instances (also known as Spot blocks), in minutes. This value must be a multiple of 60 (60, 120, 180, 240, 300, or 360).

The duration period starts as soon as your Spot Instance receives its instance ID. At the end of the duration period, Amazon EC2 marks the Spot Instance for termination and provides a Spot Instance termination notice, which gives the instance a two-minute warning before it terminates.

You can't specify an Availability Zone group or a launch group if you specify a duration.

New accounts or accounts with no previous billing history with AWS are not eligible for Spot Instances with a defined duration (also known as Spot blocks).

", "locationName":"blockDurationMinutes" }, "ClientToken":{ @@ -31425,7 +31980,7 @@ }, "ValidUntil":{ "shape":"DateTime", - "documentation":"

The end date of the request. If this is a one-time request, the request remains active until all instances launch, the request is canceled, or this date is reached. If the request is persistent, it remains active until it is canceled or this date is reached. The default end date is 7 days from the current date.

", + "documentation":"

The end date of the request, in UTC format (YYYY-MM-DDTHH:MM:SSZ).

  • For a persistent request, the request remains active until the ValidUntil date and time is reached. Otherwise, the request remains active until you cancel it.

  • For a one-time request, the request remains active until all instances launch, the request is canceled, or the ValidUntil date and time is reached. By default, the request is valid for 7 days from the date the request was created.

", "locationName":"validUntil" }, "TagSpecifications":{ @@ -31580,7 +32135,7 @@ "locationName":"reservationId" } }, - "documentation":"

Describes a reservation.

" + "documentation":"

Describes a launch request for one or more instances, and includes owner, requester, and security group information that applies to all instances in the launch request.

" }, "ReservationId":{"type":"string"}, "ReservationList":{ @@ -32462,6 +33017,11 @@ "shape":"LaunchTemplateInstanceMetadataOptions", "documentation":"

The metadata options for the instance. For more information, see Instance Metadata and User Data in the Amazon Elastic Compute Cloud User Guide.

", "locationName":"metadataOptions" + }, + "EnclaveOptions":{ + "shape":"LaunchTemplateEnclaveOptions", + "documentation":"

Indicates whether the instance is enabled for AWS Nitro Enclaves.

", + "locationName":"enclaveOptions" } }, "documentation":"

The information for a launch template.

" @@ -33123,7 +33683,7 @@ }, "HibernationOptions":{ "shape":"HibernationOptionsRequest", - "documentation":"

Indicates whether an instance is enabled for hibernation. For more information, see Hibernate your instance in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Indicates whether an instance is enabled for hibernation. For more information, see Hibernate your instance in the Amazon Elastic Compute Cloud User Guide.

You can't enable hibernation and AWS Nitro Enclaves on the same instance.

" }, "LicenseSpecifications":{ "shape":"LicenseSpecificationListRequest", @@ -33133,6 +33693,10 @@ "MetadataOptions":{ "shape":"InstanceMetadataOptionsRequest", "documentation":"

The metadata options for the instance. For more information, see Instance metadata and user data.

" + }, + "EnclaveOptions":{ + "shape":"EnclaveOptionsRequest", + "documentation":"

Indicates whether the instance is enabled for AWS Nitro Enclaves. For more information, see What is AWS Nitro Enclaves? in the AWS Nitro Enclaves User Guide.

You can't enable AWS Nitro Enclaves and hibernation on the same instance.

" } } }, @@ -33945,6 +34509,13 @@ "locationName":"SecurityGroup" } }, + "SelfServicePortal":{ + "type":"string", + "enum":[ + "enabled", + "disabled" + ] + }, "SendDiagnosticInterruptRequest":{ "type":"structure", "required":["InstanceId"], @@ -34006,6 +34577,11 @@ "documentation":"

The Amazon Resource Names (ARNs) of the Network Load Balancers for the service.

", "locationName":"networkLoadBalancerArnSet" }, + "GatewayLoadBalancerArns":{ + "shape":"ValueStringList", + "documentation":"

The Amazon Resource Names (ARNs) of the Gateway Load Balancers for the service.

", + "locationName":"gatewayLoadBalancerArnSet" + }, "BaseEndpointDnsNames":{ "shape":"ValueStringList", "documentation":"

The DNS names for the service.

", @@ -34074,6 +34650,11 @@ "documentation":"

The private DNS name for the service.

", "locationName":"privateDnsName" }, + "PrivateDnsNames":{ + "shape":"PrivateDnsDetailsSet", + "documentation":"

The private DNS names assigned to the VPC endpoint service.

", + "locationName":"privateDnsNameSet" + }, "VpcEndpointPolicySupported":{ "shape":"Boolean", "documentation":"

Indicates whether the service supports endpoint policies.

", @@ -34123,7 +34704,8 @@ "type":"string", "enum":[ "Interface", - "Gateway" + "Gateway", + "GatewayLoadBalancer" ] }, "ServiceTypeDetail":{ @@ -34248,7 +34830,7 @@ }, "OwnerAlias":{ "shape":"String", - "documentation":"

The AWS owner alias, as maintained by Amazon. The possible values are: amazon | self | all | aws-marketplace | microsoft. This AWS owner alias is not to be confused with the user-configured AWS account alias, which is set from the IAM console.

", + "documentation":"

The AWS owner alias, from an Amazon-maintained list (amazon). This is not the user-configured AWS account alias set using the IAM console.

", "locationName":"ownerAlias" }, "Tags":{ @@ -34506,12 +35088,23 @@ "capacity-optimized" ] }, + "SpotCapacityRebalance":{ + "type":"structure", + "members":{ + "ReplacementStrategy":{ + "shape":"ReplacementStrategy", + "documentation":"

The replacement strategy to use. Only available for fleets of type maintain. You must specify a value, otherwise you get an error.

To allow Spot Fleet to launch a replacement Spot Instance when an instance rebalance notification is emitted for a Spot Instance in the fleet, specify launch.

When a replacement instance is launched, the instance marked for rebalance is not automatically terminated. You can terminate it, or you can leave it running. You are charged for all instances while they are running.

", + "locationName":"replacementStrategy" + } + }, + "documentation":"

The Spot Instance replacement strategy to use when Amazon EC2 emits a signal that your Spot Instance is at an elevated risk of being interrupted. For more information, see Capacity rebalancing in the Amazon EC2 User Guide for Linux Instances.

" + }, "SpotDatafeedSubscription":{ "type":"structure", "members":{ "Bucket":{ "shape":"String", - "documentation":"

The Amazon S3 bucket where the Spot Instance data feed is located.

", + "documentation":"

The name of the Amazon S3 bucket where the Spot Instance data feed is located.

", "locationName":"bucket" }, "Fault":{ @@ -34526,7 +35119,7 @@ }, "Prefix":{ "shape":"String", - "documentation":"

The prefix that is prepended to data feed files.

", + "documentation":"

The prefix for the data feed files.

", "locationName":"prefix" }, "State":{ @@ -34592,7 +35185,7 @@ }, "NetworkInterfaces":{ "shape":"InstanceNetworkInterfaceSpecificationList", - "documentation":"

One or more network interfaces. If you specify a network interface, you must specify subnet IDs and security group IDs using the network interface.

", + "documentation":"

One or more network interfaces. If you specify a network interface, you must specify subnet IDs and security group IDs using the network interface.

SpotFleetLaunchSpecification currently does not support Elastic Fabric Adapter (EFA). To specify an EFA, you must use LaunchTemplateConfig.

", "locationName":"networkInterfaceSet" }, "Placement":{ @@ -34631,7 +35224,7 @@ "locationName":"tagSpecificationSet" } }, - "documentation":"

Describes the launch specification for one or more Spot Instances. If you include On-Demand capacity in your fleet request, you can't use SpotFleetLaunchSpecification; you must use LaunchTemplateConfig.

" + "documentation":"

Describes the launch specification for one or more Spot Instances. If you include On-Demand capacity in your fleet request or want to specify an EFA network device, you can't use SpotFleetLaunchSpecification; you must use LaunchTemplateConfig.

" }, "SpotFleetMonitoring":{ "type":"structure", @@ -34697,6 +35290,11 @@ "documentation":"

The order of the launch template overrides to use in fulfilling On-Demand capacity. If you specify lowestPrice, Spot Fleet uses price to determine the order, launching the lowest price first. If you specify prioritized, Spot Fleet uses the priority that you assign to each Spot Fleet launch template override, launching the highest priority first. If you do not specify a value, Spot Fleet defaults to lowestPrice.

", "locationName":"onDemandAllocationStrategy" }, + "SpotMaintenanceStrategies":{ + "shape":"SpotMaintenanceStrategies", + "documentation":"

The strategies for managing your Spot Instances that are at an elevated risk of being interrupted.

", + "locationName":"spotMaintenanceStrategies" + }, "ClientToken":{ "shape":"String", "documentation":"

A unique, case-sensitive identifier that you provide to ensure the idempotency of your listings. This helps to avoid duplicate listings. For more information, see Ensuring Idempotency.

", @@ -34941,7 +35539,7 @@ }, "ValidUntil":{ "shape":"DateTime", - "documentation":"

The end date of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). If this is a one-time request, it remains active until all instances launch, the request is canceled, or this date is reached. If the request is persistent, it remains active until it is canceled or this date is reached. The default end date is 7 days from the current date.

", + "documentation":"

The end date of the request, in UTC format (YYYY-MM-DDTHH:MM:SSZ).

  • For a persistent request, the request remains active until the validUntil date and time is reached. Otherwise, the request remains active until you cancel it.

  • For a one-time request, the request remains active until all instances launch, the request is canceled, or the validUntil date and time is reached. By default, the request is valid for 7 days from the date the request was created.

", "locationName":"validUntil" }, "InstanceInterruptionBehavior":{ @@ -35021,6 +35619,17 @@ "persistent" ] }, + "SpotMaintenanceStrategies":{ + "type":"structure", + "members":{ + "CapacityRebalance":{ + "shape":"SpotCapacityRebalance", + "documentation":"

The strategy to use when Amazon EC2 emits a signal that your Spot Instance is at an elevated risk of being interrupted.

", + "locationName":"capacityRebalance" + } + }, + "documentation":"

The strategies for managing your Spot Instances that are at an elevated risk of being interrupted.

" + }, "SpotMarketOptions":{ "type":"structure", "members":{ @@ -35034,11 +35643,11 @@ }, "BlockDurationMinutes":{ "shape":"Integer", - "documentation":"

The required duration for the Spot Instances (also known as Spot blocks), in minutes. This value must be a multiple of 60 (60, 120, 180, 240, 300, or 360).

" + "documentation":"

The required duration for the Spot Instances (also known as Spot blocks), in minutes. This value must be a multiple of 60 (60, 120, 180, 240, 300, or 360).

The duration period starts as soon as your Spot Instance receives its instance ID. At the end of the duration period, Amazon EC2 marks the Spot Instance for termination and provides a Spot Instance termination notice, which gives the instance a two-minute warning before it terminates.

You can't specify an Availability Zone group or a launch group if you specify a duration.

New accounts or accounts with no previous billing history with AWS are not eligible for Spot Instances with a defined duration (also known as Spot blocks).

" }, "ValidUntil":{ "shape":"DateTime", - "documentation":"

The end date of the request. For a one-time request, the request remains active until all instances launch, the request is canceled, or this date is reached. If the request is persistent, it remains active until it is canceled or this date and time is reached. The default end date is 7 days from the current date.

" + "documentation":"

The end date of the request, in UTC format (YYYY-MM-DDTHH:MM:SSZ). Supported only for persistent requests.

  • For a persistent request, the request remains active until the ValidUntil date and time is reached. Otherwise, the request remains active until you cancel it.

  • For a one-time request, ValidUntil is not supported. The request remains active until all instances launch or you cancel the request.

" }, "InstanceInterruptionBehavior":{ "shape":"InstanceInterruptionBehavior", @@ -35055,6 +35664,11 @@ "documentation":"

Indicates how to allocate the target Spot Instance capacity across the Spot Instance pools specified by the EC2 Fleet.

If the allocation strategy is lowest-price, EC2 Fleet launches instances from the Spot Instance pools with the lowest price. This is the default allocation strategy.

If the allocation strategy is diversified, EC2 Fleet launches instances from all of the Spot Instance pools that you specify.

If the allocation strategy is capacity-optimized, EC2 Fleet launches instances from Spot Instance pools with optimal capacity for the number of instances that are launching.

", "locationName":"allocationStrategy" }, + "MaintenanceStrategies":{ + "shape":"FleetSpotMaintenanceStrategies", + "documentation":"

The strategies for managing your workloads on your Spot Instances that will be interrupted. Currently only the capacity rebalance strategy is available.

", + "locationName":"maintenanceStrategies" + }, "InstanceInterruptionBehavior":{ "shape":"SpotInstanceInterruptionBehavior", "documentation":"

The behavior when a Spot Instance is interrupted. The default is terminate.

", @@ -35095,6 +35709,10 @@ "shape":"SpotAllocationStrategy", "documentation":"

Indicates how to allocate the target Spot Instance capacity across the Spot Instance pools specified by the EC2 Fleet.

If the allocation strategy is lowest-price, EC2 Fleet launches instances from the Spot Instance pools with the lowest price. This is the default allocation strategy.

If the allocation strategy is diversified, EC2 Fleet launches instances from all of the Spot Instance pools that you specify.

If the allocation strategy is capacity-optimized, EC2 Fleet launches instances from Spot Instance pools with optimal capacity for the number of instances that are launching.

" }, + "MaintenanceStrategies":{ + "shape":"FleetSpotMaintenanceStrategiesRequest", + "documentation":"

The strategies for managing your Spot Instances that are at an elevated risk of being interrupted.

" + }, "InstanceInterruptionBehavior":{ "shape":"SpotInstanceInterruptionBehavior", "documentation":"

The behavior when a Spot Instance is interrupted. The default is terminate.

" @@ -35731,7 +36349,7 @@ "members":{ "ResourceType":{ "shape":"ResourceType", - "documentation":"

The type of resource to tag. Currently, the resource types that support tagging on creation are: capacity-reservation | client-vpn-endpoint | customer-gateway | dedicated-host | dhcp-options | export-image-task | export-instance-task | fleet | fpga-image | host-reservation | import-image-task | import-snapshot-task | instance | internet-gateway | ipv4pool-ec2 | ipv6pool-ec2 | key-pair | launch-template | placement-group | prefix-list | natgateway | network-acl | route-table | security-group | spot-fleet-request | spot-instances-request | snapshot | subnet | traffic-mirror-filter | traffic-mirror-session | traffic-mirror-target | transit-gateway | transit-gateway-attachment | transit-gateway-route-table | volume |vpc | vpc-peering-connection | vpc-endpoint (for interface and gateway endpoints) | vpc-endpoint-service (for AWS PrivateLink) | vpc-flow-log | vpn-connection | vpn-gateway.

To tag a resource after it has been created, see CreateTags.

", + "documentation":"

The type of resource to tag. Currently, the resource types that support tagging on creation are: capacity-reservation | carrier-gateway | client-vpn-endpoint | customer-gateway | dedicated-host | dhcp-options | export-image-task | export-instance-task | fleet | fpga-image | host-reservation | import-image-task | import-snapshot-task | instance | internet-gateway | ipv4pool-ec2 | ipv6pool-ec2 | key-pair | launch-template | placement-group | prefix-list | natgateway | network-acl | route-table | security-group | spot-fleet-request | spot-instances-request | snapshot | subnet | traffic-mirror-filter | traffic-mirror-session | traffic-mirror-target | transit-gateway | transit-gateway-attachment | transit-gateway-route-table | volume |vpc | vpc-peering-connection | vpc-endpoint (for interface and gateway endpoints) | vpc-endpoint-service (for AWS PrivateLink) | vpc-flow-log | vpn-connection | vpn-gateway.

To tag a resource after it has been created, see CreateTags.

", "locationName":"resourceType" }, "Tags":{ @@ -37471,6 +38089,11 @@ "shape":"Ipv6SupportValue", "documentation":"

Indicates whether IPv6 support is disabled.

", "locationName":"ipv6Support" + }, + "ApplianceModeSupport":{ + "shape":"ApplianceModeSupportValue", + "documentation":"

Indicates whether appliance mode support is enabled.

", + "locationName":"applianceModeSupport" } }, "documentation":"

Describes the VPC attachment options.

" @@ -37990,12 +38613,12 @@ }, "ValidCores":{ "shape":"CoreCountList", - "documentation":"

List of the valid number of cores that can be configured for the instance type.

", + "documentation":"

The valid number of cores that can be configured for the instance type.

", "locationName":"validCores" }, "ValidThreadsPerCore":{ "shape":"ThreadsPerCoreList", - "documentation":"

List of the valid number of threads per core that can be configured for the instance type.

", + "documentation":"

The valid number of threads per core that can be configured for the instance type.

", "locationName":"validThreadsPerCore" } }, @@ -38037,7 +38660,8 @@ }, "VersionDescription":{ "type":"string", - "max":255 + "max":255, + "min":0 }, "VersionStringList":{ "type":"list", @@ -38158,7 +38782,7 @@ }, "Iops":{ "shape":"Integer", - "documentation":"

The number of I/O operations per second (IOPS) that the volume supports. For Provisioned IOPS SSD volumes, this represents the number of IOPS that are provisioned for the volume. For General Purpose SSD volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting. For more information, see Amazon EBS volume types in the Amazon Elastic Compute Cloud User Guide.

Constraints: Range is 100-16,000 IOPS for gp2 volumes and 100 to 64,000 IOPS for io1 and io2 volumes, in most Regions. The maximum IOPS for io1 and io2 of 64,000 is guaranteed only on Nitro-based instances. Other instance families guarantee performance up to 32,000 IOPS.

Condition: This parameter is required for requests to create io1 and io2 volumes; it is not used in requests to create gp2, st1, sc1, or standard volumes.

", + "documentation":"

The number of I/O operations per second (IOPS). For gp3, io1, and io2 volumes, this represents the number of IOPS that are provisioned for the volume. For gp2 volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting.

", "locationName":"iops" }, "Tags":{ @@ -38168,7 +38792,7 @@ }, "VolumeType":{ "shape":"VolumeType", - "documentation":"

The volume type. This can be gp2 for General Purpose SSD, io1 or io2 for Provisioned IOPS SSD, st1 for Throughput Optimized HDD, sc1 for Cold HDD, or standard for Magnetic volumes.

", + "documentation":"

The volume type.

", "locationName":"volumeType" }, "FastRestored":{ @@ -38180,6 +38804,11 @@ "shape":"Boolean", "documentation":"

Indicates whether Amazon EBS Multi-Attach is enabled.

", "locationName":"multiAttachEnabled" + }, + "Throughput":{ + "shape":"Integer", + "documentation":"

The throughput that the volume supports, in MiB/s.

", + "locationName":"throughput" } }, "documentation":"

Describes a volume.

" @@ -38304,6 +38933,11 @@ "documentation":"

The target EBS volume type of the volume.

", "locationName":"targetVolumeType" }, + "TargetThroughput":{ + "shape":"Integer", + "documentation":"

The target throughput of the volume, in MiB/s.

", + "locationName":"targetThroughput" + }, "OriginalSize":{ "shape":"Integer", "documentation":"

The original size of the volume, in GiB.

", @@ -38319,6 +38953,11 @@ "documentation":"

The original EBS volume type of the volume.

", "locationName":"originalVolumeType" }, + "OriginalThroughput":{ + "shape":"Integer", + "documentation":"

The original throughput of the volume, in MiB/s.

", + "locationName":"originalThroughput" + }, "Progress":{ "shape":"Long", "documentation":"

The modification progress, from 0 to 100 percent complete.

", @@ -38573,7 +39212,8 @@ "io2", "gp2", "sc1", - "st1" + "st1", + "gp3" ] }, "Vpc":{ @@ -38881,6 +39521,11 @@ "shape":"ValueStringList", "documentation":"

The Amazon Resource Names (ARNs) of the network load balancers for the service.

", "locationName":"networkLoadBalancerArnSet" + }, + "GatewayLoadBalancerArns":{ + "shape":"ValueStringList", + "documentation":"

The Amazon Resource Names (ARNs) of the Gateway Load Balancers for the service.

", + "locationName":"gatewayLoadBalancerArnSet" } }, "documentation":"

Describes a VPC endpoint connection to a service.

" @@ -38940,7 +39585,8 @@ "type":"string", "enum":[ "Interface", - "Gateway" + "Gateway", + "GatewayLoadBalancer" ] }, "VpcFlowLogId":{"type":"string"}, diff --git a/services/ec2instanceconnect/pom.xml b/services/ec2instanceconnect/pom.xml index 9f33c74888fa..2f31c2576240 100644 --- a/services/ec2instanceconnect/pom.xml +++ b/services/ec2instanceconnect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT ec2instanceconnect AWS Java SDK :: Services :: EC2 Instance Connect diff --git a/services/ecr/pom.xml b/services/ecr/pom.xml index a0564d1674a8..7679178b44b5 100644 --- a/services/ecr/pom.xml +++ b/services/ecr/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT ecr AWS Java SDK :: Services :: Amazon EC2 Container Registry diff --git a/services/ecrpublic/pom.xml b/services/ecrpublic/pom.xml new file mode 100644 index 000000000000..97a2ef7304a9 --- /dev/null +++ b/services/ecrpublic/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.40-SNAPSHOT + + ecrpublic + AWS Java SDK :: Services :: ECR PUBLIC + The AWS Java SDK for ECR PUBLIC module holds the client classes that are used for + communicating with ECR PUBLIC. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.ecrpublic + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/ecrpublic/src/main/resources/codegen-resources/paginators-1.json b/services/ecrpublic/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..bcdd4c61078a --- /dev/null +++ b/services/ecrpublic/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,28 @@ +{ + "pagination": { + "DescribeImageTags": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "imageTagDetails" + }, + "DescribeImages": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "imageDetails" + }, + "DescribeRegistries": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "registries" + }, + "DescribeRepositories": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "repositories" + } + } +} \ No newline at end of file diff --git a/services/ecrpublic/src/main/resources/codegen-resources/service-2.json b/services/ecrpublic/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..6901381fcc76 --- /dev/null +++ b/services/ecrpublic/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,1744 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2020-10-30", + "endpointPrefix":"api.ecr-public", + "jsonVersion":"1.1", + "protocol":"json", + "serviceAbbreviation":"Amazon ECR Public", + "serviceFullName":"Amazon Elastic Container Registry Public", + "serviceId":"ECR PUBLIC", + "signatureVersion":"v4", + "signingName":"ecr-public", + "targetPrefix":"SpencerFrontendService", + "uid":"ecr-public-2020-10-30" + }, + "operations":{ + "BatchCheckLayerAvailability":{ + "name":"BatchCheckLayerAvailability", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchCheckLayerAvailabilityRequest"}, + "output":{"shape":"BatchCheckLayerAvailabilityResponse"}, + "errors":[ + {"shape":"RepositoryNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ServerException"}, + {"shape":"RegistryNotFoundException"} + ], + "documentation":"

Checks the availability of one or more image layers within a repository in a public registry. When an image is pushed to a repository, each image layer is checked to verify if it has been uploaded before. If it has been uploaded, then the image layer is skipped.

This operation is used by the Amazon ECR proxy and is not generally used by customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

" + }, + "BatchDeleteImage":{ + "name":"BatchDeleteImage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchDeleteImageRequest"}, + "output":{"shape":"BatchDeleteImageResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryNotFoundException"} + ], + "documentation":"

Deletes a list of specified images within a repository in a public registry. Images are specified with either an imageTag or imageDigest.

You can remove a tag from an image by specifying the image's tag in your request. When you remove the last tag from an image, the image is deleted from your repository.

You can completely delete an image (and all of its tags) by specifying the image's digest in your request.

" + }, + "CompleteLayerUpload":{ + "name":"CompleteLayerUpload", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CompleteLayerUploadRequest"}, + "output":{"shape":"CompleteLayerUploadResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryNotFoundException"}, + {"shape":"UploadNotFoundException"}, + {"shape":"InvalidLayerException"}, + {"shape":"LayerPartTooSmallException"}, + {"shape":"LayerAlreadyExistsException"}, + {"shape":"EmptyUploadException"}, + {"shape":"RegistryNotFoundException"}, + {"shape":"UnsupportedCommandException"} + ], + "documentation":"

Informs Amazon ECR that the image layer upload has completed for a specified public registry, repository name, and upload ID. You can optionally provide a sha256 digest of the image layer for data validation purposes.

When an image is pushed, the CompleteLayerUpload API is called once per each new image layer to verify that the upload has completed.

This operation is used by the Amazon ECR proxy and is not generally used by customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

" + }, + "CreateRepository":{ + "name":"CreateRepository", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateRepositoryRequest"}, + "output":{"shape":"CreateRepositoryResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryAlreadyExistsException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Creates a repository in a public registry. For more information, see Amazon ECR repositories in the Amazon Elastic Container Registry User Guide.

" + }, + "DeleteRepository":{ + "name":"DeleteRepository", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteRepositoryRequest"}, + "output":{"shape":"DeleteRepositoryResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryNotFoundException"}, + {"shape":"RepositoryNotEmptyException"} + ], + "documentation":"

Deletes a repository in a public registry. If the repository contains images, you must either delete all images in the repository or use the force option which deletes all images on your behalf before deleting the repository.

" + }, + "DeleteRepositoryPolicy":{ + "name":"DeleteRepositoryPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteRepositoryPolicyRequest"}, + "output":{"shape":"DeleteRepositoryPolicyResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryNotFoundException"}, + {"shape":"RepositoryPolicyNotFoundException"} + ], + "documentation":"

Deletes the repository policy associated with the specified repository.

" + }, + "DescribeImageTags":{ + "name":"DescribeImageTags", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeImageTagsRequest"}, + "output":{"shape":"DescribeImageTagsResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryNotFoundException"} + ], + "documentation":"

Returns the image tag details for a repository in a public registry.

" + }, + "DescribeImages":{ + "name":"DescribeImages", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeImagesRequest"}, + "output":{"shape":"DescribeImagesResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryNotFoundException"}, + {"shape":"ImageNotFoundException"} + ], + "documentation":"

Returns metadata about the images in a repository in a public registry.

Beginning with Docker version 1.9, the Docker client compresses image layers before pushing them to a V2 Docker registry. The output of the docker images command shows the uncompressed image size, so it may return a larger image size than the image sizes returned by DescribeImages.

" + }, + "DescribeRegistries":{ + "name":"DescribeRegistries", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeRegistriesRequest"}, + "output":{"shape":"DescribeRegistriesResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"UnsupportedCommandException"}, + {"shape":"ServerException"} + ], + "documentation":"

Returns details for a public registry.

" + }, + "DescribeRepositories":{ + "name":"DescribeRepositories", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeRepositoriesRequest"}, + "output":{"shape":"DescribeRepositoriesResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryNotFoundException"} + ], + "documentation":"

Describes repositories in a public registry.

" + }, + "GetAuthorizationToken":{ + "name":"GetAuthorizationToken", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetAuthorizationTokenRequest"}, + "output":{"shape":"GetAuthorizationTokenResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"} + ], + "documentation":"

Retrieves an authorization token. An authorization token represents your IAM authentication credentials and can be used to access any Amazon ECR registry that your IAM principal has access to. The authorization token is valid for 12 hours. This API requires the ecr-public:GetAuthorizationToken and sts:GetServiceBearerToken permissions.

" + }, + "GetRegistryCatalogData":{ + "name":"GetRegistryCatalogData", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetRegistryCatalogDataRequest"}, + "output":{"shape":"GetRegistryCatalogDataResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"UnsupportedCommandException"} + ], + "documentation":"

Retrieves catalog metadata for a public registry.

" + }, + "GetRepositoryCatalogData":{ + "name":"GetRepositoryCatalogData", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetRepositoryCatalogDataRequest"}, + "output":{"shape":"GetRepositoryCatalogDataResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryNotFoundException"} + ], + "documentation":"

Retrieve catalog metadata for a repository in a public registry. This metadata is displayed publicly in the Amazon ECR Public Gallery.

" + }, + "GetRepositoryPolicy":{ + "name":"GetRepositoryPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetRepositoryPolicyRequest"}, + "output":{"shape":"GetRepositoryPolicyResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryNotFoundException"}, + {"shape":"RepositoryPolicyNotFoundException"} + ], + "documentation":"

Retrieves the repository policy for the specified repository.

" + }, + "InitiateLayerUpload":{ + "name":"InitiateLayerUpload", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"InitiateLayerUploadRequest"}, + "output":{"shape":"InitiateLayerUploadResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryNotFoundException"}, + {"shape":"RegistryNotFoundException"}, + {"shape":"UnsupportedCommandException"} + ], + "documentation":"

Notifies Amazon ECR that you intend to upload an image layer.

When an image is pushed, the InitiateLayerUpload API is called once per image layer that has not already been uploaded. Whether or not an image layer has been uploaded is determined by the BatchCheckLayerAvailability API action.

This operation is used by the Amazon ECR proxy and is not generally used by customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

" + }, + "PutImage":{ + "name":"PutImage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutImageRequest"}, + "output":{"shape":"PutImageResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryNotFoundException"}, + {"shape":"ImageAlreadyExistsException"}, + {"shape":"LayersNotFoundException"}, + {"shape":"ReferencedImagesNotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ImageTagAlreadyExistsException"}, + {"shape":"ImageDigestDoesNotMatchException"}, + {"shape":"RegistryNotFoundException"}, + {"shape":"UnsupportedCommandException"} + ], + "documentation":"

Creates or updates the image manifest and tags associated with an image.

When an image is pushed and all new image layers have been uploaded, the PutImage API is called once to create or update the image manifest and the tags associated with the image.

This operation is used by the Amazon ECR proxy and is not generally used by customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

" + }, + "PutRegistryCatalogData":{ + "name":"PutRegistryCatalogData", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutRegistryCatalogDataRequest"}, + "output":{"shape":"PutRegistryCatalogDataResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"UnsupportedCommandException"} + ], + "documentation":"

Create or updates the catalog data for a public registry.

" + }, + "PutRepositoryCatalogData":{ + "name":"PutRepositoryCatalogData", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutRepositoryCatalogDataRequest"}, + "output":{"shape":"PutRepositoryCatalogDataResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryNotFoundException"} + ], + "documentation":"

Creates or updates the catalog data for a repository in a public registry.

" + }, + "SetRepositoryPolicy":{ + "name":"SetRepositoryPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetRepositoryPolicyRequest"}, + "output":{"shape":"SetRepositoryPolicyResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"RepositoryNotFoundException"} + ], + "documentation":"

Applies a repository policy to the specified public repository to control access permissions. For more information, see Amazon ECR Repository Policies in the Amazon Elastic Container Registry User Guide.

" + }, + "UploadLayerPart":{ + "name":"UploadLayerPart", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UploadLayerPartRequest"}, + "output":{"shape":"UploadLayerPartResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidLayerPartException"}, + {"shape":"RepositoryNotFoundException"}, + {"shape":"UploadNotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"RegistryNotFoundException"}, + {"shape":"UnsupportedCommandException"} + ], + "documentation":"

Uploads an image layer part to Amazon ECR.

When an image is pushed, each new image layer is uploaded in parts. The maximum size of each image layer part can be 20971520 bytes (or about 20MB). The UploadLayerPart API is called once per each new image layer part.

This operation is used by the Amazon ECR proxy and is not generally used by customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

" + } + }, + "shapes":{ + "AboutText":{ + "type":"string", + "max":10240 + }, + "Architecture":{ + "type":"string", + "max":50, + "min":1 + }, + "ArchitectureList":{ + "type":"list", + "member":{"shape":"Architecture"}, + "max":50 + }, + "Arn":{"type":"string"}, + "AuthorizationData":{ + "type":"structure", + "members":{ + "authorizationToken":{ + "shape":"Base64", + "documentation":"

A base64-encoded string that contains authorization data for a public Amazon ECR registry. When the string is decoded, it is presented in the format user:password for public registry authentication using docker login.

" + }, + "expiresAt":{ + "shape":"ExpirationTimestamp", + "documentation":"

The Unix time in seconds and milliseconds when the authorization token expires. Authorization tokens are valid for 12 hours.

" + } + }, + "documentation":"

An authorization token data object that corresponds to a public registry.

" + }, + "Base64":{ + "type":"string", + "pattern":"^\\S+$" + }, + "BatchCheckLayerAvailabilityRequest":{ + "type":"structure", + "required":[ + "repositoryName", + "layerDigests" + ], + "members":{ + "registryId":{ + "shape":"RegistryIdOrAlias", + "documentation":"

The AWS account ID associated with the public registry that contains the image layers to check. If you do not specify a registry, the default public registry is assumed.

" + }, + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository that is associated with the image layers to check.

" + }, + "layerDigests":{ + "shape":"BatchedOperationLayerDigestList", + "documentation":"

The digests of the image layers to check.

" + } + } + }, + "BatchCheckLayerAvailabilityResponse":{ + "type":"structure", + "members":{ + "layers":{ + "shape":"LayerList", + "documentation":"

A list of image layer objects corresponding to the image layer references in the request.

" + }, + "failures":{ + "shape":"LayerFailureList", + "documentation":"

Any failures associated with the call.

" + } + } + }, + "BatchDeleteImageRequest":{ + "type":"structure", + "required":[ + "repositoryName", + "imageIds" + ], + "members":{ + "registryId":{ + "shape":"RegistryId", + "documentation":"

The AWS account ID associated with the registry that contains the image to delete. If you do not specify a registry, the default public registry is assumed.

" + }, + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

The repository in a public registry that contains the image to delete.

" + }, + "imageIds":{ + "shape":"ImageIdentifierList", + "documentation":"

A list of image ID references that correspond to images to delete. The format of the imageIds reference is imageTag=tag or imageDigest=digest.

" + } + } + }, + "BatchDeleteImageResponse":{ + "type":"structure", + "members":{ + "imageIds":{ + "shape":"ImageIdentifierList", + "documentation":"

The image IDs of the deleted images.

" + }, + "failures":{ + "shape":"ImageFailureList", + "documentation":"

Any failures associated with the call.

" + } + } + }, + "BatchedOperationLayerDigest":{ + "type":"string", + "max":1000, + "min":0 + }, + "BatchedOperationLayerDigestList":{ + "type":"list", + "member":{"shape":"BatchedOperationLayerDigest"}, + "max":100, + "min":1 + }, + "CompleteLayerUploadRequest":{ + "type":"structure", + "required":[ + "repositoryName", + "uploadId", + "layerDigests" + ], + "members":{ + "registryId":{ + "shape":"RegistryIdOrAlias", + "documentation":"

The AWS account ID associated with the registry to which to upload layers. If you do not specify a registry, the default public registry is assumed.

" + }, + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository in a public registry to associate with the image layer.

" + }, + "uploadId":{ + "shape":"UploadId", + "documentation":"

The upload ID from a previous InitiateLayerUpload operation to associate with the image layer.

" + }, + "layerDigests":{ + "shape":"LayerDigestList", + "documentation":"

The sha256 digest of the image layer.

" + } + } + }, + "CompleteLayerUploadResponse":{ + "type":"structure", + "members":{ + "registryId":{ + "shape":"RegistryId", + "documentation":"

The public registry ID associated with the request.

" + }, + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

The repository name associated with the request.

" + }, + "uploadId":{ + "shape":"UploadId", + "documentation":"

The upload ID associated with the layer.

" + }, + "layerDigest":{ + "shape":"LayerDigest", + "documentation":"

The sha256 digest of the image layer.

" + } + } + }, + "CreateRepositoryRequest":{ + "type":"structure", + "required":["repositoryName"], + "members":{ + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

The name to use for the repository. This appears publicly in the Amazon ECR Public Gallery. The repository name may be specified on its own (such as nginx-web-app) or it can be prepended with a namespace to group the repository into a category (such as project-a/nginx-web-app).

" + }, + "catalogData":{ + "shape":"RepositoryCatalogDataInput", + "documentation":"

The details about the repository that are publicly visible in the Amazon ECR Public Gallery.

" + } + } + }, + "CreateRepositoryResponse":{ + "type":"structure", + "members":{ + "repository":{ + "shape":"Repository", + "documentation":"

The repository that was created.

" + }, + "catalogData":{"shape":"RepositoryCatalogData"} + } + }, + "CreationTimestamp":{"type":"timestamp"}, + "DefaultRegistryAliasFlag":{"type":"boolean"}, + "DeleteRepositoryPolicyRequest":{ + "type":"structure", + "required":["repositoryName"], + "members":{ + "registryId":{ + "shape":"RegistryId", + "documentation":"

The AWS account ID associated with the public registry that contains the repository policy to delete. If you do not specify a registry, the default public registry is assumed.

" + }, + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository that is associated with the repository policy to delete.

" + } + } + }, + "DeleteRepositoryPolicyResponse":{ + "type":"structure", + "members":{ + "registryId":{ + "shape":"RegistryId", + "documentation":"

The registry ID associated with the request.

" + }, + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

The repository name associated with the request.

" + }, + "policyText":{ + "shape":"RepositoryPolicyText", + "documentation":"

The JSON repository policy that was deleted from the repository.

" + } + } + }, + "DeleteRepositoryRequest":{ + "type":"structure", + "required":["repositoryName"], + "members":{ + "registryId":{ + "shape":"RegistryId", + "documentation":"

The AWS account ID associated with the public registry that contains the repository to delete. If you do not specify a registry, the default public registry is assumed.

" + }, + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository to delete.

" + }, + "force":{ + "shape":"ForceFlag", + "documentation":"

If a repository contains images, forces the deletion.

" + } + } + }, + "DeleteRepositoryResponse":{ + "type":"structure", + "members":{ + "repository":{ + "shape":"Repository", + "documentation":"

The repository that was deleted.

" + } + } + }, + "DescribeImageTagsRequest":{ + "type":"structure", + "required":["repositoryName"], + "members":{ + "registryId":{ + "shape":"RegistryId", + "documentation":"

The AWS account ID associated with the public registry that contains the repository in which to describe images. If you do not specify a registry, the default public registry is assumed.

" + }, + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository that contains the image tag details to describe.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The nextToken value returned from a previous paginated DescribeImageTags request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value. This value is null when there are no more results to return. This option cannot be used when you specify images with imageIds.

" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of repository results returned by DescribeImageTags in paginated output. When this parameter is used, DescribeImageTags only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another DescribeImageTags request with the returned nextToken value. This value can be between 1 and 1000. If this parameter is not used, then DescribeImageTags returns up to 100 results and a nextToken value, if applicable. This option cannot be used when you specify images with imageIds.

" + } + } + }, + "DescribeImageTagsResponse":{ + "type":"structure", + "members":{ + "imageTagDetails":{ + "shape":"ImageTagDetailList", + "documentation":"

The image tag details for the images in the requested repository.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The nextToken value to include in a future DescribeImageTags request. When the results of a DescribeImageTags request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

" + } + } + }, + "DescribeImagesRequest":{ + "type":"structure", + "required":["repositoryName"], + "members":{ + "registryId":{ + "shape":"RegistryId", + "documentation":"

The AWS account ID associated with the public registry that contains the repository in which to describe images. If you do not specify a registry, the default public registry is assumed.

" + }, + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

The repository that contains the images to describe.

" + }, + "imageIds":{ + "shape":"ImageIdentifierList", + "documentation":"

The list of image IDs for the requested repository.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The nextToken value returned from a previous paginated DescribeImages request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value. This value is null when there are no more results to return. This option cannot be used when you specify images with imageIds.

" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of repository results returned by DescribeImages in paginated output. When this parameter is used, DescribeImages only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another DescribeImages request with the returned nextToken value. This value can be between 1 and 1000. If this parameter is not used, then DescribeImages returns up to 100 results and a nextToken value, if applicable. This option cannot be used when you specify images with imageIds.

" + } + } + }, + "DescribeImagesResponse":{ + "type":"structure", + "members":{ + "imageDetails":{ + "shape":"ImageDetailList", + "documentation":"

A list of ImageDetail objects that contain data about the image.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The nextToken value to include in a future DescribeImages request. When the results of a DescribeImages request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

" + } + } + }, + "DescribeRegistriesRequest":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

The nextToken value returned from a previous paginated DescribeRegistries request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value. This value is null when there are no more results to return.

This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.

" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of repository results returned by DescribeRegistries in paginated output. When this parameter is used, DescribeRegistries only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another DescribeRegistries request with the returned nextToken value. This value can be between 1 and 1000. If this parameter is not used, then DescribeRegistries returns up to 100 results and a nextToken value, if applicable.

" + } + } + }, + "DescribeRegistriesResponse":{ + "type":"structure", + "required":["registries"], + "members":{ + "registries":{ + "shape":"RegistryList", + "documentation":"

An object containing the details for a public registry.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The nextToken value to include in a future DescribeRepositories request. When the results of a DescribeRepositories request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

" + } + } + }, + "DescribeRepositoriesRequest":{ + "type":"structure", + "members":{ + "registryId":{ + "shape":"RegistryId", + "documentation":"

The AWS account ID associated with the registry that contains the repositories to be described. If you do not specify a registry, the default public registry is assumed.

" + }, + "repositoryNames":{ + "shape":"RepositoryNameList", + "documentation":"

A list of repositories to describe. If this parameter is omitted, then all repositories in a registry are described.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The nextToken value returned from a previous paginated DescribeRepositories request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value. This value is null when there are no more results to return. This option cannot be used when you specify repositories with repositoryNames.

This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.

" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of repository results returned by DescribeRepositories in paginated output. When this parameter is used, DescribeRepositories only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another DescribeRepositories request with the returned nextToken value. This value can be between 1 and 1000. If this parameter is not used, then DescribeRepositories returns up to 100 results and a nextToken value, if applicable. This option cannot be used when you specify repositories with repositoryNames.

" + } + } + }, + "DescribeRepositoriesResponse":{ + "type":"structure", + "members":{ + "repositories":{ + "shape":"RepositoryList", + "documentation":"

A list of repository objects corresponding to valid repositories.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The nextToken value to include in a future DescribeRepositories request. When the results of a DescribeRepositories request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

" + } + } + }, + "EmptyUploadException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

The specified layer upload does not contain any layer parts.

", + "exception":true + }, + "ExceptionMessage":{"type":"string"}, + "ExpirationTimestamp":{"type":"timestamp"}, + "ForceFlag":{"type":"boolean"}, + "GetAuthorizationTokenRequest":{ + "type":"structure", + "members":{ + } + }, + "GetAuthorizationTokenResponse":{ + "type":"structure", + "members":{ + "authorizationData":{ + "shape":"AuthorizationData", + "documentation":"

An authorization token data object that corresponds to a public registry.

" + } + } + }, + "GetRegistryCatalogDataRequest":{ + "type":"structure", + "members":{ + } + }, + "GetRegistryCatalogDataResponse":{ + "type":"structure", + "required":["registryCatalogData"], + "members":{ + "registryCatalogData":{ + "shape":"RegistryCatalogData", + "documentation":"

The catalog metadata for the public registry.

" + } + } + }, + "GetRepositoryCatalogDataRequest":{ + "type":"structure", + "required":["repositoryName"], + "members":{ + "registryId":{ + "shape":"RegistryId", + "documentation":"

The AWS account ID associated with the registry that contains the repositories to be described. If you do not specify a registry, the default public registry is assumed.

" + }, + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository to retrieve the catalog metadata for.

" + } + } + }, + "GetRepositoryCatalogDataResponse":{ + "type":"structure", + "members":{ + "catalogData":{ + "shape":"RepositoryCatalogData", + "documentation":"

The catalog metadata for the repository.

" + } + } + }, + "GetRepositoryPolicyRequest":{ + "type":"structure", + "required":["repositoryName"], + "members":{ + "registryId":{ + "shape":"RegistryId", + "documentation":"

The AWS account ID associated with the public registry that contains the repository. If you do not specify a registry, the default public registry is assumed.

" + }, + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository with the policy to retrieve.

" + } + } + }, + "GetRepositoryPolicyResponse":{ + "type":"structure", + "members":{ + "registryId":{ + "shape":"RegistryId", + "documentation":"

The registry ID associated with the request.

" + }, + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

The repository name associated with the request.

" + }, + "policyText":{ + "shape":"RepositoryPolicyText", + "documentation":"

The repository policy text associated with the repository. The policy text will be in JSON format.

" + } + } + }, + "Image":{ + "type":"structure", + "members":{ + "registryId":{ + "shape":"RegistryIdOrAlias", + "documentation":"

The AWS account ID associated with the registry containing the image.

" + }, + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository associated with the image.

" + }, + "imageId":{ + "shape":"ImageIdentifier", + "documentation":"

An object containing the image tag and image digest associated with an image.

" + }, + "imageManifest":{ + "shape":"ImageManifest", + "documentation":"

The image manifest associated with the image.

" + }, + "imageManifestMediaType":{ + "shape":"MediaType", + "documentation":"

The manifest media type of the image.

" + } + }, + "documentation":"

An object representing an Amazon ECR image.

" + }, + "ImageAlreadyExistsException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

The specified image has already been pushed, and there were no changes to the manifest or image tag after the last push.

", + "exception":true + }, + "ImageDetail":{ + "type":"structure", + "members":{ + "registryId":{ + "shape":"RegistryId", + "documentation":"

The AWS account ID associated with the public registry to which this image belongs.

" + }, + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository to which this image belongs.

" + }, + "imageDigest":{ + "shape":"ImageDigest", + "documentation":"

The sha256 digest of the image manifest.

" + }, + "imageTags":{ + "shape":"ImageTagList", + "documentation":"

The list of tags associated with this image.

" + }, + "imageSizeInBytes":{ + "shape":"ImageSizeInBytes", + "documentation":"

The size, in bytes, of the image in the repository.

If the image is a manifest list, this will be the max size of all manifests in the list.

Beginning with Docker version 1.9, the Docker client compresses image layers before pushing them to a V2 Docker registry. The output of the docker images command shows the uncompressed image size, so it may return a larger image size than the image sizes returned by DescribeImages.

" + }, + "imagePushedAt":{ + "shape":"PushTimestamp", + "documentation":"

The date and time, expressed in standard JavaScript date format, at which the current image was pushed to the repository.

" + }, + "imageManifestMediaType":{ + "shape":"MediaType", + "documentation":"

The media type of the image manifest.

" + }, + "artifactMediaType":{ + "shape":"MediaType", + "documentation":"

The artifact media type of the image.

" + } + }, + "documentation":"

An object that describes an image returned by a DescribeImages operation.

" + }, + "ImageDetailList":{ + "type":"list", + "member":{"shape":"ImageDetail"} + }, + "ImageDigest":{"type":"string"}, + "ImageDigestDoesNotMatchException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

The specified image digest does not match the digest that Amazon ECR calculated for the image.

", + "exception":true + }, + "ImageFailure":{ + "type":"structure", + "members":{ + "imageId":{ + "shape":"ImageIdentifier", + "documentation":"

The image ID associated with the failure.

" + }, + "failureCode":{ + "shape":"ImageFailureCode", + "documentation":"

The code associated with the failure.

" + }, + "failureReason":{ + "shape":"ImageFailureReason", + "documentation":"

The reason for the failure.

" + } + }, + "documentation":"

An object representing an Amazon ECR image failure.

" + }, + "ImageFailureCode":{ + "type":"string", + "enum":[ + "InvalidImageDigest", + "InvalidImageTag", + "ImageTagDoesNotMatchDigest", + "ImageNotFound", + "MissingDigestAndTag", + "ImageReferencedByManifestList", + "KmsError" + ] + }, + "ImageFailureList":{ + "type":"list", + "member":{"shape":"ImageFailure"} + }, + "ImageFailureReason":{"type":"string"}, + "ImageIdentifier":{ + "type":"structure", + "members":{ + "imageDigest":{ + "shape":"ImageDigest", + "documentation":"

The sha256 digest of the image manifest.

" + }, + "imageTag":{ + "shape":"ImageTag", + "documentation":"

The tag used for the image.

" + } + }, + "documentation":"

An object with identifying information for an Amazon ECR image.

" + }, + "ImageIdentifierList":{ + "type":"list", + "member":{"shape":"ImageIdentifier"}, + "max":100, + "min":1 + }, + "ImageManifest":{ + "type":"string", + "max":4194304, + "min":1 + }, + "ImageNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

The image requested does not exist in the specified repository.

", + "exception":true + }, + "ImageSizeInBytes":{"type":"long"}, + "ImageTag":{ + "type":"string", + "max":300, + "min":1 + }, + "ImageTagAlreadyExistsException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

The specified image is tagged with a tag that already exists. The repository is configured for tag immutability.

", + "exception":true + }, + "ImageTagDetail":{ + "type":"structure", + "members":{ + "imageTag":{ + "shape":"ImageTag", + "documentation":"

The tag associated with the image.

" + }, + "createdAt":{ + "shape":"CreationTimestamp", + "documentation":"

The time stamp indicating when the image tag was created.

" + }, + "imageDetail":{ + "shape":"ReferencedImageDetail", + "documentation":"

An object that describes the details of an image.

" + } + }, + "documentation":"

An object representing the image tag details for an image.

" + }, + "ImageTagDetailList":{ + "type":"list", + "member":{"shape":"ImageTagDetail"} + }, + "ImageTagList":{ + "type":"list", + "member":{"shape":"ImageTag"} + }, + "InitiateLayerUploadRequest":{ + "type":"structure", + "required":["repositoryName"], + "members":{ + "registryId":{ + "shape":"RegistryIdOrAlias", + "documentation":"

The AWS account ID associated with the registry to which you intend to upload layers. If you do not specify a registry, the default public registry is assumed.

" + }, + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository to which you intend to upload layers.

" + } + } + }, + "InitiateLayerUploadResponse":{ + "type":"structure", + "members":{ + "uploadId":{ + "shape":"UploadId", + "documentation":"

The upload ID for the layer upload. This parameter is passed to further UploadLayerPart and CompleteLayerUpload operations.

" + }, + "partSize":{ + "shape":"PartSize", + "documentation":"

The size, in bytes, that Amazon ECR expects future layer part uploads to be.

" + } + } + }, + "InvalidLayerException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

The layer digest calculation performed by Amazon ECR upon receipt of the image layer does not match the digest specified.

", + "exception":true + }, + "InvalidLayerPartException":{ + "type":"structure", + "members":{ + "registryId":{ + "shape":"RegistryId", + "documentation":"

The AWS account ID associated with the layer part.

" + }, + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository.

" + }, + "uploadId":{ + "shape":"UploadId", + "documentation":"

The upload ID associated with the layer part.

" + }, + "lastValidByteReceived":{ + "shape":"PartSize", + "documentation":"

The position of the last byte of the layer part.

" + }, + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

The layer part size is not valid, or the first byte specified is not consecutive to the last byte of a previous layer part upload.

", + "exception":true + }, + "InvalidParameterException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

The specified parameter is invalid. Review the available parameters for the API request.

", + "exception":true + }, + "Layer":{ + "type":"structure", + "members":{ + "layerDigest":{ + "shape":"LayerDigest", + "documentation":"

The sha256 digest of the image layer.

" + }, + "layerAvailability":{ + "shape":"LayerAvailability", + "documentation":"

The availability status of the image layer.

" + }, + "layerSize":{ + "shape":"LayerSizeInBytes", + "documentation":"

The size, in bytes, of the image layer.

" + }, + "mediaType":{ + "shape":"MediaType", + "documentation":"

The media type of the layer, such as application/vnd.docker.image.rootfs.diff.tar.gzip or application/vnd.oci.image.layer.v1.tar+gzip.

" + } + }, + "documentation":"

An object representing an Amazon ECR image layer.

" + }, + "LayerAlreadyExistsException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

The image layer already exists in the associated repository.

", + "exception":true + }, + "LayerAvailability":{ + "type":"string", + "enum":[ + "AVAILABLE", + "UNAVAILABLE" + ] + }, + "LayerDigest":{ + "type":"string", + "pattern":"[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+" + }, + "LayerDigestList":{ + "type":"list", + "member":{"shape":"LayerDigest"}, + "max":100, + "min":1 + }, + "LayerFailure":{ + "type":"structure", + "members":{ + "layerDigest":{ + "shape":"BatchedOperationLayerDigest", + "documentation":"

The layer digest associated with the failure.

" + }, + "failureCode":{ + "shape":"LayerFailureCode", + "documentation":"

The failure code associated with the failure.

" + }, + "failureReason":{ + "shape":"LayerFailureReason", + "documentation":"

The reason for the failure.

" + } + }, + "documentation":"

An object representing an Amazon ECR image layer failure.

" + }, + "LayerFailureCode":{ + "type":"string", + "enum":[ + "InvalidLayerDigest", + "MissingLayerDigest" + ] + }, + "LayerFailureList":{ + "type":"list", + "member":{"shape":"LayerFailure"} + }, + "LayerFailureReason":{"type":"string"}, + "LayerList":{ + "type":"list", + "member":{"shape":"Layer"} + }, + "LayerPartBlob":{ + "type":"blob", + "max":20971520, + "min":0 + }, + "LayerPartTooSmallException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

Layer parts must be at least 5 MiB in size.

", + "exception":true + }, + "LayerSizeInBytes":{"type":"long"}, + "LayersNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

The specified layers could not be found, or the specified layer is not valid for this repository.

", + "exception":true + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

The operation did not succeed because it would have exceeded a service limit for your account. For more information, see Amazon ECR Service Quotas in the Amazon Elastic Container Registry User Guide.

", + "exception":true + }, + "LogoImageBlob":{ + "type":"blob", + "max":512000, + "min":0 + }, + "MarketplaceCertified":{"type":"boolean"}, + "MaxResults":{ + "type":"integer", + "max":1000, + "min":1 + }, + "MediaType":{"type":"string"}, + "NextToken":{"type":"string"}, + "OperatingSystem":{ + "type":"string", + "max":50, + "min":1 + }, + "OperatingSystemList":{ + "type":"list", + "member":{"shape":"OperatingSystem"}, + "max":50 + }, + "PartSize":{ + "type":"long", + "min":0 + }, + "PrimaryRegistryAliasFlag":{"type":"boolean"}, + "PushTimestamp":{"type":"timestamp"}, + "PutImageRequest":{ + "type":"structure", + "required":[ + "repositoryName", + "imageManifest" + ], + "members":{ + "registryId":{ + "shape":"RegistryIdOrAlias", + "documentation":"

The AWS account ID associated with the public registry that contains the repository in which to put the image. If you do not specify a registry, the default public registry is assumed.

" + }, + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository in which to put the image.

" + }, + "imageManifest":{ + "shape":"ImageManifest", + "documentation":"

The image manifest corresponding to the image to be uploaded.

" + }, + "imageManifestMediaType":{ + "shape":"MediaType", + "documentation":"

The media type of the image manifest. If you push an image manifest that does not contain the mediaType field, you must specify the imageManifestMediaType in the request.

" + }, + "imageTag":{ + "shape":"ImageTag", + "documentation":"

The tag to associate with the image. This parameter is required for images that use the Docker Image Manifest V2 Schema 2 or Open Container Initiative (OCI) formats.

" + }, + "imageDigest":{ + "shape":"ImageDigest", + "documentation":"

The image digest of the image manifest corresponding to the image.

" + } + } + }, + "PutImageResponse":{ + "type":"structure", + "members":{ + "image":{ + "shape":"Image", + "documentation":"

Details of the image uploaded.

" + } + } + }, + "PutRegistryCatalogDataRequest":{ + "type":"structure", + "members":{ + "displayName":{ + "shape":"RegistryDisplayName", + "documentation":"

The display name for a public registry. The display name is shown as the repository author in the Amazon ECR Public Gallery.

The registry display name is only publicly visible in the Amazon ECR Public Gallery for verified accounts.

" + } + } + }, + "PutRegistryCatalogDataResponse":{ + "type":"structure", + "required":["registryCatalogData"], + "members":{ + "registryCatalogData":{ + "shape":"RegistryCatalogData", + "documentation":"

The catalog data for the public registry.

" + } + } + }, + "PutRepositoryCatalogDataRequest":{ + "type":"structure", + "required":[ + "repositoryName", + "catalogData" + ], + "members":{ + "registryId":{ + "shape":"RegistryId", + "documentation":"

The AWS account ID associated with the public registry the repository is in. If you do not specify a registry, the default public registry is assumed.

" + }, + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository to create or update the catalog data for.

" + }, + "catalogData":{ + "shape":"RepositoryCatalogDataInput", + "documentation":"

An object containing the catalog data for a repository. This data is publicly visible in the Amazon ECR Public Gallery.

" + } + } + }, + "PutRepositoryCatalogDataResponse":{ + "type":"structure", + "members":{ + "catalogData":{ + "shape":"RepositoryCatalogData", + "documentation":"

The catalog data for the repository.

" + } + } + }, + "ReferencedImageDetail":{ + "type":"structure", + "members":{ + "imageDigest":{ + "shape":"ImageDigest", + "documentation":"

The sha256 digest of the image manifest.

" + }, + "imageSizeInBytes":{ + "shape":"ImageSizeInBytes", + "documentation":"

The size, in bytes, of the image in the repository.

If the image is a manifest list, this will be the max size of all manifests in the list.

Beginning with Docker version 1.9, the Docker client compresses image layers before pushing them to a V2 Docker registry. The output of the docker images command shows the uncompressed image size, so it may return a larger image size than the image sizes returned by DescribeImages.

" + }, + "imagePushedAt":{ + "shape":"PushTimestamp", + "documentation":"

The date and time, expressed in standard JavaScript date format, at which the current image tag was pushed to the repository.

" + }, + "imageManifestMediaType":{ + "shape":"MediaType", + "documentation":"

The media type of the image manifest.

" + }, + "artifactMediaType":{ + "shape":"MediaType", + "documentation":"

The artifact media type of the image.

" + } + }, + "documentation":"

An object that describes the image tag details returned by a DescribeImageTags action.

" + }, + "ReferencedImagesNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

The manifest list is referencing an image that does not exist.

", + "exception":true + }, + "Registry":{ + "type":"structure", + "required":[ + "registryId", + "registryArn", + "registryUri", + "verified", + "aliases" + ], + "members":{ + "registryId":{ + "shape":"RegistryId", + "documentation":"

The AWS account ID associated with the registry. If you do not specify a registry, the default public registry is assumed.

" + }, + "registryArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the public registry.

" + }, + "registryUri":{ + "shape":"Url", + "documentation":"

The URI of a public registry. The URI contains a universal prefix and the registry alias.

" + }, + "verified":{ + "shape":"RegistryVerified", + "documentation":"

Whether the account is verified. This indicates whether the account is an AWS Marketplace vendor. If an account is verified, each public repository will received a verified account badge on the Amazon ECR Public Gallery.

" + }, + "aliases":{ + "shape":"RegistryAliasList", + "documentation":"

An array of objects representing the aliases for a public registry.

" + } + }, + "documentation":"

The details of a public registry.

" + }, + "RegistryAlias":{ + "type":"structure", + "required":[ + "name", + "status", + "primaryRegistryAlias", + "defaultRegistryAlias" + ], + "members":{ + "name":{ + "shape":"RegistryAliasName", + "documentation":"

The name of the registry alias.

" + }, + "status":{ + "shape":"RegistryAliasStatus", + "documentation":"

The status of the registry alias.

" + }, + "primaryRegistryAlias":{ + "shape":"PrimaryRegistryAliasFlag", + "documentation":"

Whether or not the registry alias is the primary alias for the registry. If true, the alias is the primary registry alias and is displayed in both the repository URL and the image URI used in the docker pull commands on the Amazon ECR Public Gallery.

A registry alias that is not the primary registry alias can be used in the repository URI in a docker pull command.

" + }, + "defaultRegistryAlias":{ + "shape":"DefaultRegistryAliasFlag", + "documentation":"

Whether or not the registry alias is the default alias for the registry. When the first public repository is created, your public registry is assigned a default registry alias.

" + } + }, + "documentation":"

An object representing the aliases for a public registry. A public registry is given an alias upon creation but a custom alias can be set using the Amazon ECR console. For more information, see Registries in the Amazon Elastic Container Registry User Guide.

" + }, + "RegistryAliasList":{ + "type":"list", + "member":{"shape":"RegistryAlias"} + }, + "RegistryAliasName":{ + "type":"string", + "max":50, + "min":2, + "pattern":"[a-z][a-z0-9]+(?:[._-][a-z0-9]+)*" + }, + "RegistryAliasStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "PENDING", + "REJECTED" + ] + }, + "RegistryCatalogData":{ + "type":"structure", + "members":{ + "displayName":{ + "shape":"RegistryDisplayName", + "documentation":"

The display name for a public registry. This appears on the Amazon ECR Public Gallery.

Only accounts that have the verified account badge can have a registry display name.

" + } + }, + "documentation":"

The metadata for a public registry.

" + }, + "RegistryDisplayName":{ + "type":"string", + "max":100, + "min":0 + }, + "RegistryId":{ + "type":"string", + "pattern":"[0-9]{12}" + }, + "RegistryIdOrAlias":{ + "type":"string", + "max":256, + "min":1 + }, + "RegistryList":{ + "type":"list", + "member":{"shape":"Registry"} + }, + "RegistryNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

The registry does not exist.

", + "exception":true + }, + "RegistryVerified":{"type":"boolean"}, + "Repository":{ + "type":"structure", + "members":{ + "repositoryArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) that identifies the repository. The ARN contains the arn:aws:ecr namespace, followed by the region of the repository, AWS account ID of the repository owner, repository namespace, and repository name. For example, arn:aws:ecr:region:012345678910:repository/test.

" + }, + "registryId":{ + "shape":"RegistryId", + "documentation":"

The AWS account ID associated with the public registry that contains the repository.

" + }, + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository.

" + }, + "repositoryUri":{ + "shape":"Url", + "documentation":"

The URI for the repository. You can use this URI for container image push and pull operations.

" + }, + "createdAt":{ + "shape":"CreationTimestamp", + "documentation":"

The date and time, in JavaScript date format, when the repository was created.

" + } + }, + "documentation":"

An object representing a repository.

" + }, + "RepositoryAlreadyExistsException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

The specified repository already exists in the specified registry.

", + "exception":true + }, + "RepositoryCatalogData":{ + "type":"structure", + "members":{ + "description":{ + "shape":"RepositoryDescription", + "documentation":"

The short description of the repository.

" + }, + "architectures":{ + "shape":"ArchitectureList", + "documentation":"

The architecture tags that are associated with the repository.

Only supported operating system tags appear publicly in the Amazon ECR Public Gallery. For more information, see RepositoryCatalogDataInput.

" + }, + "operatingSystems":{ + "shape":"OperatingSystemList", + "documentation":"

The operating system tags that are associated with the repository.

Only supported operating system tags appear publicly in the Amazon ECR Public Gallery. For more information, see RepositoryCatalogDataInput.

" + }, + "logoUrl":{ + "shape":"ResourceUrl", + "documentation":"

The URL containing the logo associated with the repository.

" + }, + "aboutText":{ + "shape":"AboutText", + "documentation":"

The longform description of the contents of the repository. This text appears in the repository details on the Amazon ECR Public Gallery.

" + }, + "usageText":{ + "shape":"UsageText", + "documentation":"

The longform usage details of the contents of the repository. The usage text provides context for users of the repository.

" + }, + "marketplaceCertified":{ + "shape":"MarketplaceCertified", + "documentation":"

Whether or not the repository is certified by AWS Marketplace.

" + } + }, + "documentation":"

The catalog data for a repository. This data is publicly visible in the Amazon ECR Public Gallery.

" + }, + "RepositoryCatalogDataInput":{ + "type":"structure", + "members":{ + "description":{ + "shape":"RepositoryDescription", + "documentation":"

A short description of the contents of the repository. This text appears in both the image details and also when searching for repositories on the Amazon ECR Public Gallery.

" + }, + "architectures":{ + "shape":"ArchitectureList", + "documentation":"

The system architecture that the images in the repository are compatible with. On the Amazon ECR Public Gallery, the following supported architectures will appear as badges on the repository and are used as search filters.

  • Linux

  • Windows

If an unsupported tag is added to your repository catalog data, it will be associated with the repository and can be retrieved using the API but will not be discoverable in the Amazon ECR Public Gallery.

" + }, + "operatingSystems":{ + "shape":"OperatingSystemList", + "documentation":"

The operating systems that the images in the repository are compatible with. On the Amazon ECR Public Gallery, the following supported operating systems will appear as badges on the repository and are used as search filters.

  • ARM

  • ARM 64

  • x86

  • x86-64

If an unsupported tag is added to your repository catalog data, it will be associated with the repository and can be retrieved using the API but will not be discoverable in the Amazon ECR Public Gallery.

" + }, + "logoImageBlob":{ + "shape":"LogoImageBlob", + "documentation":"

The base64-encoded repository logo payload.

The repository logo is only publicly visible in the Amazon ECR Public Gallery for verified accounts.

" + }, + "aboutText":{ + "shape":"AboutText", + "documentation":"

A detailed description of the contents of the repository. It is publicly visible in the Amazon ECR Public Gallery. The text must be in markdown format.

" + }, + "usageText":{ + "shape":"UsageText", + "documentation":"

Detailed information on how to use the contents of the repository. It is publicly visible in the Amazon ECR Public Gallery. The usage text provides context, support information, and additional usage details for users of the repository. The text must be in markdown format.

" + } + }, + "documentation":"

An object containing the catalog data for a repository. This data is publicly visible in the Amazon ECR Public Gallery.

" + }, + "RepositoryDescription":{ + "type":"string", + "max":1024 + }, + "RepositoryList":{ + "type":"list", + "member":{"shape":"Repository"} + }, + "RepositoryName":{ + "type":"string", + "max":205, + "min":2, + "pattern":"(?:[a-z0-9]+(?:[._-][a-z0-9]+)*/)*[a-z0-9]+(?:[._-][a-z0-9]+)*" + }, + "RepositoryNameList":{ + "type":"list", + "member":{"shape":"RepositoryName"}, + "max":100, + "min":1 + }, + "RepositoryNotEmptyException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

The specified repository contains images. To delete a repository that contains images, you must force the deletion with the force parameter.

", + "exception":true + }, + "RepositoryNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

The specified repository could not be found. Check the spelling of the specified repository and ensure that you are performing operations on the correct registry.

", + "exception":true + }, + "RepositoryPolicyNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

The specified repository and registry combination does not have an associated repository policy.

", + "exception":true + }, + "RepositoryPolicyText":{ + "type":"string", + "max":10240, + "min":0 + }, + "ResourceUrl":{ + "type":"string", + "max":2048 + }, + "ServerException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

These errors are usually caused by a server-side issue.

", + "exception":true, + "fault":true + }, + "SetRepositoryPolicyRequest":{ + "type":"structure", + "required":[ + "repositoryName", + "policyText" + ], + "members":{ + "registryId":{ + "shape":"RegistryId", + "documentation":"

The AWS account ID associated with the registry that contains the repository. If you do not specify a registry, the default public registry is assumed.

" + }, + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository to receive the policy.

" + }, + "policyText":{ + "shape":"RepositoryPolicyText", + "documentation":"

The JSON repository policy text to apply to the repository. For more information, see Amazon ECR Repository Policies in the Amazon Elastic Container Registry User Guide.

" + }, + "force":{ + "shape":"ForceFlag", + "documentation":"

If the policy you are attempting to set on a repository policy would prevent you from setting another policy in the future, you must force the SetRepositoryPolicy operation. This is intended to prevent accidental repository lock outs.

" + } + } + }, + "SetRepositoryPolicyResponse":{ + "type":"structure", + "members":{ + "registryId":{ + "shape":"RegistryId", + "documentation":"

The registry ID associated with the request.

" + }, + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

The repository name associated with the request.

" + }, + "policyText":{ + "shape":"RepositoryPolicyText", + "documentation":"

The JSON repository policy text applied to the repository.

" + } + } + }, + "UnsupportedCommandException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

The action is not supported in this Region.

", + "exception":true + }, + "UploadId":{ + "type":"string", + "pattern":"[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}" + }, + "UploadLayerPartRequest":{ + "type":"structure", + "required":[ + "repositoryName", + "uploadId", + "partFirstByte", + "partLastByte", + "layerPartBlob" + ], + "members":{ + "registryId":{ + "shape":"RegistryIdOrAlias", + "documentation":"

The AWS account ID associated with the registry to which you are uploading layer parts. If you do not specify a registry, the default public registry is assumed.

" + }, + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository to which you are uploading layer parts.

" + }, + "uploadId":{ + "shape":"UploadId", + "documentation":"

The upload ID from a previous InitiateLayerUpload operation to associate with the layer part upload.

" + }, + "partFirstByte":{ + "shape":"PartSize", + "documentation":"

The position of the first byte of the layer part witin the overall image layer.

" + }, + "partLastByte":{ + "shape":"PartSize", + "documentation":"

The position of the last byte of the layer part within the overall image layer.

" + }, + "layerPartBlob":{ + "shape":"LayerPartBlob", + "documentation":"

The base64-encoded layer part payload.

" + } + } + }, + "UploadLayerPartResponse":{ + "type":"structure", + "members":{ + "registryId":{ + "shape":"RegistryId", + "documentation":"

The registry ID associated with the request.

" + }, + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

The repository name associated with the request.

" + }, + "uploadId":{ + "shape":"UploadId", + "documentation":"

The upload ID associated with the request.

" + }, + "lastByteReceived":{ + "shape":"PartSize", + "documentation":"

The integer value of the last byte received in the request.

" + } + } + }, + "UploadNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

The upload could not be found, or the specified upload ID is not valid for this repository.

", + "exception":true + }, + "Url":{"type":"string"}, + "UsageText":{ + "type":"string", + "max":10240 + } + }, + "documentation":"Amazon Elastic Container Registry Public

Amazon Elastic Container Registry (Amazon ECR) is a managed container image registry service. Amazon ECR provides both public and private registries to host your container images. You can use the familiar Docker CLI, or their preferred client, to push, pull, and manage images. Amazon ECR provides a secure, scalable, and reliable registry for your Docker or Open Container Initiative (OCI) images. Amazon ECR supports public repositories with this API. For information about the Amazon ECR API for private repositories, see Amazon Elastic Container Registry API Reference.

" +} diff --git a/services/ecs/pom.xml b/services/ecs/pom.xml index 5b08c2d54041..fc98735e8628 100644 --- a/services/ecs/pom.xml +++ b/services/ecs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT ecs AWS Java SDK :: Services :: Amazon EC2 Container Service diff --git a/services/ecs/src/main/resources/codegen-resources/service-2.json b/services/ecs/src/main/resources/codegen-resources/service-2.json index e2af225f7aaa..a9b8b85a577d 100644 --- a/services/ecs/src/main/resources/codegen-resources/service-2.json +++ b/services/ecs/src/main/resources/codegen-resources/service-2.json @@ -715,6 +715,21 @@ ], "documentation":"

Deletes specified tags from a resource.

" }, + "UpdateCapacityProvider":{ + "name":"UpdateCapacityProvider", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateCapacityProviderRequest"}, + "output":{"shape":"UpdateCapacityProviderResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"} + ], + "documentation":"

Modifies the parameters for a capacity provider.

" + }, "UpdateClusterSettings":{ "name":"UpdateClusterSettings", "http":{ @@ -914,11 +929,11 @@ "members":{ "name":{ "shape":"String", - "documentation":"

The name of the attribute. Up to 128 letters (uppercase and lowercase), numbers, hyphens, underscores, and periods are allowed.

" + "documentation":"

The name of the attribute. The name must contain between 1 and 128 characters and name may contain letters (uppercase and lowercase), numbers, hyphens, underscores, forward slashes, back slashes, or periods.

" }, "value":{ "shape":"String", - "documentation":"

The value of the attribute. Up to 128 letters (uppercase and lowercase), numbers, hyphens, underscores, periods, at signs (@), forward slashes, colons, and spaces are allowed.

" + "documentation":"

The value of the attribute. The value must contain between 1 and 128 characters and may contain letters (uppercase and lowercase), numbers, hyphens, underscores, periods, at signs (@), forward slashes, back slashes, colons, or spaces. The value cannot contain any leading or trailing whitespace.

" }, "targetType":{ "shape":"TargetType", @@ -961,17 +976,28 @@ }, "documentation":"

The details of the Auto Scaling group for the capacity provider.

" }, + "AutoScalingGroupProviderUpdate":{ + "type":"structure", + "members":{ + "managedScaling":{"shape":"ManagedScaling"}, + "managedTerminationProtection":{ + "shape":"ManagedTerminationProtection", + "documentation":"

The managed termination protection setting to use for the Auto Scaling group capacity provider. This determines whether the Auto Scaling group has managed termination protection.

When using managed termination protection, managed scaling must also be used otherwise managed termination protection will not work.

When managed termination protection is enabled, Amazon ECS prevents the Amazon EC2 instances in an Auto Scaling group that contain tasks from being terminated during a scale-in action. The Auto Scaling group and each instance in the Auto Scaling group must have instance protection from scale-in actions enabled as well. For more information, see Instance Protection in the AWS Auto Scaling User Guide.

When managed termination protection is disabled, your Amazon EC2 instances are not protected from termination when the Auto Scaling group scales in.

" + } + }, + "documentation":"

The details of the Auto Scaling group capacity provider to update.

" + }, "AwsVpcConfiguration":{ "type":"structure", "required":["subnets"], "members":{ "subnets":{ "shape":"StringList", - "documentation":"

The subnets associated with the task or service. There is a limit of 16 subnets that can be specified per AwsVpcConfiguration.

All specified subnets must be from the same VPC.

" + "documentation":"

The IDs of the subnets associated with the task or service. There is a limit of 16 subnets that can be specified per AwsVpcConfiguration.

All specified subnets must be from the same VPC.

" }, "securityGroups":{ "shape":"StringList", - "documentation":"

The security groups associated with the task or service. If you do not specify a security group, the default security group for the VPC is used. There is a limit of 5 security groups that can be specified per AwsVpcConfiguration.

All specified security groups must be from the same VPC.

" + "documentation":"

The IDs of the security groups associated with the task or service. If you do not specify a security group, the default security group for the VPC is used. There is a limit of 5 security groups that can be specified per AwsVpcConfiguration.

All specified security groups must be from the same VPC.

" }, "assignPublicIp":{ "shape":"AssignPublicIp", @@ -1083,7 +1109,10 @@ "enum":[ "DELETE_IN_PROGRESS", "DELETE_COMPLETE", - "DELETE_FAILED" + "DELETE_FAILED", + "UPDATE_IN_PROGRESS", + "UPDATE_COMPLETE", + "UPDATE_FAILED" ] }, "CapacityProviders":{ @@ -1329,11 +1358,11 @@ "members":{ "name":{ "shape":"String", - "documentation":"

The name of a container. If you are linking multiple containers together in a task definition, the name of one container can be entered in the links of another container to connect the containers. Up to 255 letters (uppercase and lowercase), numbers, and hyphens are allowed. This parameter maps to name in the Create a container section of the Docker Remote API and the --name option to docker run.

" + "documentation":"

The name of a container. If you are linking multiple containers together in a task definition, the name of one container can be entered in the links of another container to connect the containers. Up to 255 letters (uppercase and lowercase), numbers, and hyphens are allowed. This parameter maps to name in the Create a container section of the Docker Remote API and the --name option to docker run.

" }, "image":{ "shape":"String", - "documentation":"

The image used to start a container. This string is passed directly to the Docker daemon. Images in the Docker Hub registry are available by default. Other repositories are specified with either repository-url/image:tag or repository-url/image@digest . Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to Image in the Create a container section of the Docker Remote API and the IMAGE parameter of docker run.

  • When a new task starts, the Amazon ECS container agent pulls the latest version of the specified image and tag for the container to use. However, subsequent updates to a repository image are not propagated to already running tasks.

  • Images in Amazon ECR repositories can be specified by either using the full registry/repository:tag or registry/repository@digest. For example, 012345678910.dkr.ecr.<region-name>.amazonaws.com/<repository-name>:latest or 012345678910.dkr.ecr.<region-name>.amazonaws.com/<repository-name>@sha256:94afd1f2e64d908bc90dbca0035a5b567EXAMPLE.

  • Images in official repositories on Docker Hub use a single name (for example, ubuntu or mongo).

  • Images in other repositories on Docker Hub are qualified with an organization name (for example, amazon/amazon-ecs-agent).

  • Images in other online repositories are qualified further by a domain name (for example, quay.io/assemblyline/ubuntu).

" + "documentation":"

The image used to start a container. This string is passed directly to the Docker daemon. Images in the Docker Hub registry are available by default. Other repositories are specified with either repository-url/image:tag or repository-url/image@digest . Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to Image in the Create a container section of the Docker Remote API and the IMAGE parameter of docker run.

  • When a new task starts, the Amazon ECS container agent pulls the latest version of the specified image and tag for the container to use. However, subsequent updates to a repository image are not propagated to already running tasks.

  • Images in Amazon ECR repositories can be specified by either using the full registry/repository:tag or registry/repository@digest. For example, 012345678910.dkr.ecr.<region-name>.amazonaws.com/<repository-name>:latest or 012345678910.dkr.ecr.<region-name>.amazonaws.com/<repository-name>@sha256:94afd1f2e64d908bc90dbca0035a5b567EXAMPLE.

  • Images in official repositories on Docker Hub use a single name (for example, ubuntu or mongo).

  • Images in other repositories on Docker Hub are qualified with an organization name (for example, amazon/amazon-ecs-agent).

  • Images in other online repositories are qualified further by a domain name (for example, quay.io/assemblyline/ubuntu).

" }, "repositoryCredentials":{ "shape":"RepositoryCredentials", @@ -1341,23 +1370,23 @@ }, "cpu":{ "shape":"Integer", - "documentation":"

The number of cpu units reserved for the container. This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run.

This field is optional for tasks using the Fargate launch type, and the only requirement is that the total amount of CPU reserved for all containers within a task be lower than the task-level cpu value.

You can determine the number of CPU units that are available per EC2 instance type by multiplying the vCPUs listed for that instance type on the Amazon EC2 Instances detail page by 1,024.

Linux containers share unallocated CPU units with other containers on the container instance with the same ratio as their allocated amount. For example, if you run a single-container task on a single-core instance type with 512 CPU units specified for that container, and that is the only task running on the container instance, that container could use the full 1,024 CPU unit share at any given time. However, if you launched another copy of the same task on that container instance, each task would be guaranteed a minimum of 512 CPU units when needed, and each container could float to higher CPU usage if the other container was not using it, but if both tasks were 100% active all of the time, they would be limited to 512 CPU units.

On Linux container instances, the Docker daemon on the container instance uses the CPU value to calculate the relative CPU share ratios for running containers. For more information, see CPU share constraint in the Docker documentation. The minimum valid CPU share value that the Linux kernel allows is 2. However, the CPU parameter is not required, and you can use CPU values below 2 in your container definitions. For CPU values below 2 (including null), the behavior varies based on your Amazon ECS container agent version:

  • Agent versions less than or equal to 1.1.0: Null and zero CPU values are passed to Docker as 0, which Docker then converts to 1,024 CPU shares. CPU values of 1 are passed to Docker as 1, which the Linux kernel converts to two CPU shares.

  • Agent versions greater than or equal to 1.2.0: Null, zero, and CPU values of 1 are passed to Docker as 2.

On Windows container instances, the CPU limit is enforced as an absolute limit, or a quota. Windows containers only have access to the specified amount of CPU that is described in the task definition.

" + "documentation":"

The number of cpu units reserved for the container. This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run.

This field is optional for tasks using the Fargate launch type, and the only requirement is that the total amount of CPU reserved for all containers within a task be lower than the task-level cpu value.

You can determine the number of CPU units that are available per EC2 instance type by multiplying the vCPUs listed for that instance type on the Amazon EC2 Instances detail page by 1,024.

Linux containers share unallocated CPU units with other containers on the container instance with the same ratio as their allocated amount. For example, if you run a single-container task on a single-core instance type with 512 CPU units specified for that container, and that is the only task running on the container instance, that container could use the full 1,024 CPU unit share at any given time. However, if you launched another copy of the same task on that container instance, each task would be guaranteed a minimum of 512 CPU units when needed, and each container could float to higher CPU usage if the other container was not using it, but if both tasks were 100% active all of the time, they would be limited to 512 CPU units.

On Linux container instances, the Docker daemon on the container instance uses the CPU value to calculate the relative CPU share ratios for running containers. For more information, see CPU share constraint in the Docker documentation. The minimum valid CPU share value that the Linux kernel allows is 2. However, the CPU parameter is not required, and you can use CPU values below 2 in your container definitions. For CPU values below 2 (including null), the behavior varies based on your Amazon ECS container agent version:

  • Agent versions less than or equal to 1.1.0: Null and zero CPU values are passed to Docker as 0, which Docker then converts to 1,024 CPU shares. CPU values of 1 are passed to Docker as 1, which the Linux kernel converts to two CPU shares.

  • Agent versions greater than or equal to 1.2.0: Null, zero, and CPU values of 1 are passed to Docker as 2.

On Windows container instances, the CPU limit is enforced as an absolute limit, or a quota. Windows containers only have access to the specified amount of CPU that is described in the task definition. A null or zero CPU value is passed to Docker as 0, which Windows interprets as 1% of one CPU.

" }, "memory":{ "shape":"BoxedInteger", - "documentation":"

The amount (in MiB) of memory to present to the container. If your container attempts to exceed the memory specified here, the container is killed. The total amount of memory reserved for all containers within a task must be lower than the task memory value, if one is specified. This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run.

If using the Fargate launch type, this parameter is optional.

If using the EC2 launch type, you must specify either a task-level memory value or a container-level memory value. If you specify both a container-level memory and memoryReservation value, memory must be greater than memoryReservation. If you specify memoryReservation, then that value is subtracted from the available memory resources for the container instance on which the container is placed. Otherwise, the value of memory is used.

The Docker daemon reserves a minimum of 4 MiB of memory for a container, so you should not specify fewer than 4 MiB of memory for your containers.

" + "documentation":"

The amount (in MiB) of memory to present to the container. If your container attempts to exceed the memory specified here, the container is killed. The total amount of memory reserved for all containers within a task must be lower than the task memory value, if one is specified. This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run.

If using the Fargate launch type, this parameter is optional.

If using the EC2 launch type, you must specify either a task-level memory value or a container-level memory value. If you specify both a container-level memory and memoryReservation value, memory must be greater than memoryReservation. If you specify memoryReservation, then that value is subtracted from the available memory resources for the container instance on which the container is placed. Otherwise, the value of memory is used.

The Docker daemon reserves a minimum of 4 MiB of memory for a container, so you should not specify fewer than 4 MiB of memory for your containers.

" }, "memoryReservation":{ "shape":"BoxedInteger", - "documentation":"

The soft limit (in MiB) of memory to reserve for the container. When system memory is under heavy contention, Docker attempts to keep the container memory to this soft limit. However, your container can consume more memory when it needs to, up to either the hard limit specified with the memory parameter (if applicable), or all of the available memory on the container instance, whichever comes first. This parameter maps to MemoryReservation in the Create a container section of the Docker Remote API and the --memory-reservation option to docker run.

If a task-level memory value is not specified, you must specify a non-zero integer for one or both of memory or memoryReservation in a container definition. If you specify both, memory must be greater than memoryReservation. If you specify memoryReservation, then that value is subtracted from the available memory resources for the container instance on which the container is placed. Otherwise, the value of memory is used.

For example, if your container normally uses 128 MiB of memory, but occasionally bursts to 256 MiB of memory for short periods of time, you can set a memoryReservation of 128 MiB, and a memory hard limit of 300 MiB. This configuration would allow the container to only reserve 128 MiB of memory from the remaining resources on the container instance, but also allow the container to consume more memory resources when needed.

The Docker daemon reserves a minimum of 4 MiB of memory for a container, so you should not specify fewer than 4 MiB of memory for your containers.

" + "documentation":"

The soft limit (in MiB) of memory to reserve for the container. When system memory is under heavy contention, Docker attempts to keep the container memory to this soft limit. However, your container can consume more memory when it needs to, up to either the hard limit specified with the memory parameter (if applicable), or all of the available memory on the container instance, whichever comes first. This parameter maps to MemoryReservation in the Create a container section of the Docker Remote API and the --memory-reservation option to docker run.

If a task-level memory value is not specified, you must specify a non-zero integer for one or both of memory or memoryReservation in a container definition. If you specify both, memory must be greater than memoryReservation. If you specify memoryReservation, then that value is subtracted from the available memory resources for the container instance on which the container is placed. Otherwise, the value of memory is used.

For example, if your container normally uses 128 MiB of memory, but occasionally bursts to 256 MiB of memory for short periods of time, you can set a memoryReservation of 128 MiB, and a memory hard limit of 300 MiB. This configuration would allow the container to only reserve 128 MiB of memory from the remaining resources on the container instance, but also allow the container to consume more memory resources when needed.

The Docker daemon reserves a minimum of 4 MiB of memory for a container, so you should not specify fewer than 4 MiB of memory for your containers.

" }, "links":{ "shape":"StringList", - "documentation":"

The links parameter allows containers to communicate with each other without the need for port mappings. This parameter is only supported if the network mode of a task definition is bridge. The name:internalName construct is analogous to name:alias in Docker links. Up to 255 letters (uppercase and lowercase), numbers, and hyphens are allowed. For more information about linking Docker containers, go to Legacy container links in the Docker documentation. This parameter maps to Links in the Create a container section of the Docker Remote API and the --link option to docker run.

This parameter is not supported for Windows containers.

Containers that are collocated on a single container instance may be able to communicate with each other without requiring links or host port mappings. Network isolation is achieved on the container instance using security groups and VPC settings.

" + "documentation":"

The links parameter allows containers to communicate with each other without the need for port mappings. This parameter is only supported if the network mode of a task definition is bridge. The name:internalName construct is analogous to name:alias in Docker links. Up to 255 letters (uppercase and lowercase), numbers, and hyphens are allowed. For more information about linking Docker containers, go to Legacy container links in the Docker documentation. This parameter maps to Links in the Create a container section of the Docker Remote API and the --link option to docker run.

This parameter is not supported for Windows containers or tasks that use the awsvpc network mode.

Containers that are collocated on a single container instance may be able to communicate with each other without requiring links or host port mappings. Network isolation is achieved on the container instance using security groups and VPC settings.

" }, "portMappings":{ "shape":"PortMappingList", - "documentation":"

The list of port mappings for the container. Port mappings allow containers to access ports on the host container instance to send or receive traffic.

For task definitions that use the awsvpc network mode, you should only specify the containerPort. The hostPort can be left blank or it must be the same value as the containerPort.

Port mappings on Windows use the NetNAT gateway address rather than localhost. There is no loopback for port mappings on Windows, so you cannot access a container's mapped port from the host itself.

This parameter maps to PortBindings in the Create a container section of the Docker Remote API and the --publish option to docker run. If the network mode of a task definition is set to none, then you can't specify port mappings. If the network mode of a task definition is set to host, then host ports must either be undefined or they must match the container port in the port mapping.

After a task reaches the RUNNING status, manual and automatic host and container port assignments are visible in the Network Bindings section of a container description for a selected task in the Amazon ECS console. The assignments are also visible in the networkBindings section DescribeTasks responses.

" + "documentation":"

The list of port mappings for the container. Port mappings allow containers to access ports on the host container instance to send or receive traffic.

For task definitions that use the awsvpc network mode, you should only specify the containerPort. The hostPort can be left blank or it must be the same value as the containerPort.

Port mappings on Windows use the NetNAT gateway address rather than localhost. There is no loopback for port mappings on Windows, so you cannot access a container's mapped port from the host itself.

This parameter maps to PortBindings in the Create a container section of the Docker Remote API and the --publish option to docker run. If the network mode of a task definition is set to none, then you can't specify port mappings. If the network mode of a task definition is set to host, then host ports must either be undefined or they must match the container port in the port mapping.

After a task reaches the RUNNING status, manual and automatic host and container port assignments are visible in the Network Bindings section of a container description for a selected task in the Amazon ECS console. The assignments are also visible in the networkBindings section DescribeTasks responses.

" }, "essential":{ "shape":"BoxedBoolean", @@ -1365,27 +1394,27 @@ }, "entryPoint":{ "shape":"StringList", - "documentation":"

Early versions of the Amazon ECS container agent do not properly handle entryPoint parameters. If you have problems using entryPoint, update your container agent or enter your commands and arguments as command array items instead.

The entry point that is passed to the container. This parameter maps to Entrypoint in the Create a container section of the Docker Remote API and the --entrypoint option to docker run. For more information, see https://docs.docker.com/engine/reference/builder/#entrypoint.

" + "documentation":"

Early versions of the Amazon ECS container agent do not properly handle entryPoint parameters. If you have problems using entryPoint, update your container agent or enter your commands and arguments as command array items instead.

The entry point that is passed to the container. This parameter maps to Entrypoint in the Create a container section of the Docker Remote API and the --entrypoint option to docker run. For more information, see https://docs.docker.com/engine/reference/builder/#entrypoint.

" }, "command":{ "shape":"StringList", - "documentation":"

The command that is passed to the container. This parameter maps to Cmd in the Create a container section of the Docker Remote API and the COMMAND parameter to docker run. For more information, see https://docs.docker.com/engine/reference/builder/#cmd. If there are multiple arguments, each argument should be a separated string in the array.

" + "documentation":"

The command that is passed to the container. This parameter maps to Cmd in the Create a container section of the Docker Remote API and the COMMAND parameter to docker run. For more information, see https://docs.docker.com/engine/reference/builder/#cmd. If there are multiple arguments, each argument should be a separated string in the array.

" }, "environment":{ "shape":"EnvironmentVariables", - "documentation":"

The environment variables to pass to a container. This parameter maps to Env in the Create a container section of the Docker Remote API and the --env option to docker run.

We do not recommend using plaintext environment variables for sensitive information, such as credential data.

" + "documentation":"

The environment variables to pass to a container. This parameter maps to Env in the Create a container section of the Docker Remote API and the --env option to docker run.

We do not recommend using plaintext environment variables for sensitive information, such as credential data.

" }, "environmentFiles":{ "shape":"EnvironmentFiles", - "documentation":"

A list of files containing the environment variables to pass to a container. This parameter maps to the --env-file option to docker run.

You can specify up to ten environment files. The file must have a .env file extension. Each line in an environment file should contain an environment variable in VARIABLE=VALUE format. Lines beginning with # are treated as comments and are ignored. For more information on the environment variable file syntax, see Declare default environment variables in file.

If there are environment variables specified using the environment parameter in a container definition, they take precedence over the variables contained within an environment file. If multiple environment files are specified that contain the same variable, they are processed from the top down. It is recommended to use unique variable names. For more information, see Specifying Environment Variables in the Amazon Elastic Container Service Developer Guide.

This field is not valid for containers in tasks using the Fargate launch type.

" + "documentation":"

A list of files containing the environment variables to pass to a container. This parameter maps to the --env-file option to docker run.

You can specify up to ten environment files. The file must have a .env file extension. Each line in an environment file should contain an environment variable in VARIABLE=VALUE format. Lines beginning with # are treated as comments and are ignored. For more information on the environment variable file syntax, see Declare default environment variables in file.

If there are environment variables specified using the environment parameter in a container definition, they take precedence over the variables contained within an environment file. If multiple environment files are specified that contain the same variable, they are processed from the top down. It is recommended to use unique variable names. For more information, see Specifying Environment Variables in the Amazon Elastic Container Service Developer Guide.

This field is not valid for containers in tasks using the Fargate launch type.

" }, "mountPoints":{ "shape":"MountPointList", - "documentation":"

The mount points for data volumes in your container.

This parameter maps to Volumes in the Create a container section of the Docker Remote API and the --volume option to docker run.

Windows containers can mount whole directories on the same drive as $env:ProgramData. Windows containers cannot mount directories on a different drive, and mount point cannot be across drives.

" + "documentation":"

The mount points for data volumes in your container.

This parameter maps to Volumes in the Create a container section of the Docker Remote API and the --volume option to docker run.

Windows containers can mount whole directories on the same drive as $env:ProgramData. Windows containers cannot mount directories on a different drive, and mount point cannot be across drives.

" }, "volumesFrom":{ "shape":"VolumeFromList", - "documentation":"

Data volumes to mount from another container. This parameter maps to VolumesFrom in the Create a container section of the Docker Remote API and the --volumes-from option to docker run.

" + "documentation":"

Data volumes to mount from another container. This parameter maps to VolumesFrom in the Create a container section of the Docker Remote API and the --volumes-from option to docker run.

" }, "linuxParameters":{ "shape":"LinuxParameters", @@ -1401,7 +1430,7 @@ }, "startTimeout":{ "shape":"BoxedInteger", - "documentation":"

Time duration (in seconds) to wait before giving up on resolving dependencies for a container. For example, you specify two containers in a task definition with containerA having a dependency on containerB reaching a COMPLETE, SUCCESS, or HEALTHY status. If a startTimeout value is specified for containerB and it does not reach the desired status within that time then containerA will give up and not start. This results in the task transitioning to a STOPPED state.

For tasks using the Fargate launch type, this parameter requires that the task or service uses platform version 1.3.0 or later. If this parameter is not specified, the default value of 3 minutes is used.

For tasks using the EC2 launch type, if the startTimeout parameter is not specified, the value set for the Amazon ECS container agent configuration variable ECS_CONTAINER_START_TIMEOUT is used by default. If neither the startTimeout parameter or the ECS_CONTAINER_START_TIMEOUT agent configuration variable are set, then the default values of 3 minutes for Linux containers and 8 minutes on Windows containers are used. Your container instances require at least version 1.26.0 of the container agent to enable a container start timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you are using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init package. If your container instances are launched from version 20190301 or later, then they contain the required versions of the container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

Time duration (in seconds) to wait before giving up on resolving dependencies for a container. For example, you specify two containers in a task definition with containerA having a dependency on containerB reaching a COMPLETE, SUCCESS, or HEALTHY status. If a startTimeout value is specified for containerB and it does not reach the desired status within that time then containerA will give up and not start. This results in the task transitioning to a STOPPED state.

When the ECS_CONTAINER_START_TIMEOUT container agent configuration variable is used, it is enforced indendently from this start timeout value.

For tasks using the Fargate launch type, this parameter requires that the task or service uses platform version 1.3.0 or later.

For tasks using the EC2 launch type, your container instances require at least version 1.26.0 of the container agent to enable a container start timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you are using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init package. If your container instances are launched from version 20190301 or later, then they contain the required versions of the container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.

" }, "stopTimeout":{ "shape":"BoxedInteger", @@ -1409,71 +1438,71 @@ }, "hostname":{ "shape":"String", - "documentation":"

The hostname to use for your container. This parameter maps to Hostname in the Create a container section of the Docker Remote API and the --hostname option to docker run.

The hostname parameter is not supported if you are using the awsvpc network mode.

" + "documentation":"

The hostname to use for your container. This parameter maps to Hostname in the Create a container section of the Docker Remote API and the --hostname option to docker run.

The hostname parameter is not supported if you are using the awsvpc network mode.

" }, "user":{ "shape":"String", - "documentation":"

The user name to use inside the container. This parameter maps to User in the Create a container section of the Docker Remote API and the --user option to docker run.

You can use the following formats. If specifying a UID or GID, you must specify it as a positive integer.

  • user

  • user:group

  • uid

  • uid:gid

  • user:gid

  • uid:group

This parameter is not supported for Windows containers.

" + "documentation":"

The user to use inside the container. This parameter maps to User in the Create a container section of the Docker Remote API and the --user option to docker run.

When running tasks using the host network mode, you should not run containers using the root user (UID 0). It is considered best practice to use a non-root user.

You can specify the user using the following formats. If specifying a UID or GID, you must specify it as a positive integer.

  • user

  • user:group

  • uid

  • uid:gid

  • user:gid

  • uid:group

This parameter is not supported for Windows containers or tasks that use the awsvpc network mode.

" }, "workingDirectory":{ "shape":"String", - "documentation":"

The working directory in which to run commands inside the container. This parameter maps to WorkingDir in the Create a container section of the Docker Remote API and the --workdir option to docker run.

" + "documentation":"

The working directory in which to run commands inside the container. This parameter maps to WorkingDir in the Create a container section of the Docker Remote API and the --workdir option to docker run.

" }, "disableNetworking":{ "shape":"BoxedBoolean", - "documentation":"

When this parameter is true, networking is disabled within the container. This parameter maps to NetworkDisabled in the Create a container section of the Docker Remote API.

This parameter is not supported for Windows containers.

" + "documentation":"

When this parameter is true, networking is disabled within the container. This parameter maps to NetworkDisabled in the Create a container section of the Docker Remote API.

This parameter is not supported for Windows containers or tasks that use the awsvpc network mode.

" }, "privileged":{ "shape":"BoxedBoolean", - "documentation":"

When this parameter is true, the container is given elevated privileges on the host container instance (similar to the root user). This parameter maps to Privileged in the Create a container section of the Docker Remote API and the --privileged option to docker run.

This parameter is not supported for Windows containers or tasks using the Fargate launch type.

" + "documentation":"

When this parameter is true, the container is given elevated privileges on the host container instance (similar to the root user). This parameter maps to Privileged in the Create a container section of the Docker Remote API and the --privileged option to docker run.

This parameter is not supported for Windows containers or tasks using the Fargate launch type.

" }, "readonlyRootFilesystem":{ "shape":"BoxedBoolean", - "documentation":"

When this parameter is true, the container is given read-only access to its root file system. This parameter maps to ReadonlyRootfs in the Create a container section of the Docker Remote API and the --read-only option to docker run.

This parameter is not supported for Windows containers.

" + "documentation":"

When this parameter is true, the container is given read-only access to its root file system. This parameter maps to ReadonlyRootfs in the Create a container section of the Docker Remote API and the --read-only option to docker run.

This parameter is not supported for Windows containers or tasks that use the awsvpc network mode.

" }, "dnsServers":{ "shape":"StringList", - "documentation":"

A list of DNS servers that are presented to the container. This parameter maps to Dns in the Create a container section of the Docker Remote API and the --dns option to docker run.

This parameter is not supported for Windows containers.

" + "documentation":"

A list of DNS servers that are presented to the container. This parameter maps to Dns in the Create a container section of the Docker Remote API and the --dns option to docker run.

This parameter is not supported for Windows containers or tasks that use the awsvpc network mode.

" }, "dnsSearchDomains":{ "shape":"StringList", - "documentation":"

A list of DNS search domains that are presented to the container. This parameter maps to DnsSearch in the Create a container section of the Docker Remote API and the --dns-search option to docker run.

This parameter is not supported for Windows containers.

" + "documentation":"

A list of DNS search domains that are presented to the container. This parameter maps to DnsSearch in the Create a container section of the Docker Remote API and the --dns-search option to docker run.

This parameter is not supported for Windows containers or tasks that use the awsvpc network mode.

" }, "extraHosts":{ "shape":"HostEntryList", - "documentation":"

A list of hostnames and IP address mappings to append to the /etc/hosts file on the container. This parameter maps to ExtraHosts in the Create a container section of the Docker Remote API and the --add-host option to docker run.

This parameter is not supported for Windows containers or tasks that use the awsvpc network mode.

" + "documentation":"

A list of hostnames and IP address mappings to append to the /etc/hosts file on the container. This parameter maps to ExtraHosts in the Create a container section of the Docker Remote API and the --add-host option to docker run.

This parameter is not supported for Windows containers or tasks that use the awsvpc network mode.

" }, "dockerSecurityOptions":{ "shape":"StringList", - "documentation":"

A list of strings to provide custom labels for SELinux and AppArmor multi-level security systems. This field is not valid for containers in tasks using the Fargate launch type.

With Windows containers, this parameter can be used to reference a credential spec file when configuring a container for Active Directory authentication. For more information, see Using gMSAs for Windows Containers in the Amazon Elastic Container Service Developer Guide.

This parameter maps to SecurityOpt in the Create a container section of the Docker Remote API and the --security-opt option to docker run.

The Amazon ECS container agent running on a container instance must register with the ECS_SELINUX_CAPABLE=true or ECS_APPARMOR_CAPABLE=true environment variables before containers placed on that instance can use these security options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

A list of strings to provide custom labels for SELinux and AppArmor multi-level security systems. This field is not valid for containers in tasks using the Fargate launch type.

With Windows containers, this parameter can be used to reference a credential spec file when configuring a container for Active Directory authentication. For more information, see Using gMSAs for Windows Containers in the Amazon Elastic Container Service Developer Guide.

This parameter maps to SecurityOpt in the Create a container section of the Docker Remote API and the --security-opt option to docker run.

The Amazon ECS container agent running on a container instance must register with the ECS_SELINUX_CAPABLE=true or ECS_APPARMOR_CAPABLE=true environment variables before containers placed on that instance can use these security options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

For more information about valid values, see Docker Run Security Configuration.

Valid values: \"no-new-privileges\" | \"apparmor:PROFILE\" | \"label:value\" | \"credentialspec:CredentialSpecFilePath\"

" }, "interactive":{ "shape":"BoxedBoolean", - "documentation":"

When this parameter is true, this allows you to deploy containerized applications that require stdin or a tty to be allocated. This parameter maps to OpenStdin in the Create a container section of the Docker Remote API and the --interactive option to docker run.

" + "documentation":"

When this parameter is true, this allows you to deploy containerized applications that require stdin or a tty to be allocated. This parameter maps to OpenStdin in the Create a container section of the Docker Remote API and the --interactive option to docker run.

" }, "pseudoTerminal":{ "shape":"BoxedBoolean", - "documentation":"

When this parameter is true, a TTY is allocated. This parameter maps to Tty in the Create a container section of the Docker Remote API and the --tty option to docker run.

" + "documentation":"

When this parameter is true, a TTY is allocated. This parameter maps to Tty in the Create a container section of the Docker Remote API and the --tty option to docker run.

" }, "dockerLabels":{ "shape":"DockerLabelsMap", - "documentation":"

A key/value map of labels to add to the container. This parameter maps to Labels in the Create a container section of the Docker Remote API and the --label option to docker run. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

" + "documentation":"

A key/value map of labels to add to the container. This parameter maps to Labels in the Create a container section of the Docker Remote API and the --label option to docker run. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

" }, "ulimits":{ "shape":"UlimitList", - "documentation":"

A list of ulimits to set in the container. If a ulimit value is specified in a task definition, it will override the default values set by Docker. This parameter maps to Ulimits in the Create a container section of the Docker Remote API and the --ulimit option to docker run. Valid naming values are displayed in the Ulimit data type. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

This parameter is not supported for Windows containers.

" + "documentation":"

A list of ulimits to set in the container. If a ulimit value is specified in a task definition, it will override the default values set by Docker. This parameter maps to Ulimits in the Create a container section of the Docker Remote API and the --ulimit option to docker run. Valid naming values are displayed in the Ulimit data type. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

This parameter is not supported for Windows containers or tasks that use the awsvpc network mode.

" }, "logConfiguration":{ "shape":"LogConfiguration", - "documentation":"

The log configuration specification for the container.

This parameter maps to LogConfig in the Create a container section of the Docker Remote API and the --log-driver option to docker run. By default, containers use the same logging driver that the Docker daemon uses. However the container may use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information on the options for different supported log drivers, see Configure logging drivers in the Docker documentation.

Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in the LogConfiguration data type). Additional log drivers may be available in future releases of the Amazon ECS container agent.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The log configuration specification for the container.

This parameter maps to LogConfig in the Create a container section of the Docker Remote API and the --log-driver option to docker run. By default, containers use the same logging driver that the Docker daemon uses. However the container may use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information on the options for different supported log drivers, see Configure logging drivers in the Docker documentation.

Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in the LogConfiguration data type). Additional log drivers may be available in future releases of the Amazon ECS container agent.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

" }, "healthCheck":{ "shape":"HealthCheck", - "documentation":"

The container health check command and associated configuration parameters for the container. This parameter maps to HealthCheck in the Create a container section of the Docker Remote API and the HEALTHCHECK parameter of docker run.

" + "documentation":"

The container health check command and associated configuration parameters for the container. This parameter maps to HealthCheck in the Create a container section of the Docker Remote API and the HEALTHCHECK parameter of docker run.

" }, "systemControls":{ "shape":"SystemControls", - "documentation":"

A list of namespaced kernel parameters to set in the container. This parameter maps to Sysctls in the Create a container section of the Docker Remote API and the --sysctl option to docker run.

It is not recommended that you specify network-related systemControls parameters for multiple containers in a single task that also uses either the awsvpc or host network modes. For tasks that use the awsvpc network mode, the container that is started last determines which systemControls parameters take effect. For tasks that use the host network mode, it changes the container instance's namespaced kernel parameters as well as the containers.

" + "documentation":"

A list of namespaced kernel parameters to set in the container. This parameter maps to Sysctls in the Create a container section of the Docker Remote API and the --sysctl option to docker run.

It is not recommended that you specify network-related systemControls parameters for multiple containers in a single task that also uses either the awsvpc or host network modes. For tasks that use the awsvpc network mode, the container that is started last determines which systemControls parameters take effect. For tasks that use the host network mode, it changes the container instance's namespaced kernel parameters as well as the containers.

" }, "resourceRequirements":{ "shape":"ResourceRequirements", @@ -1507,7 +1536,7 @@ }, "condition":{ "shape":"ContainerCondition", - "documentation":"

The dependency condition of the container. The following are the available conditions and their behavior:

  • START - This condition emulates the behavior of links and volumes today. It validates that a dependent container is started before permitting other containers to start.

  • COMPLETE - This condition validates that a dependent container runs to completion (exits) before permitting other containers to start. This can be useful for nonessential containers that run a script and then exit.

  • SUCCESS - This condition is the same as COMPLETE, but it also requires that the container exits with a zero status.

  • HEALTHY - This condition validates that the dependent container passes its Docker health check before permitting other containers to start. This requires that the dependent container has health checks configured. This condition is confirmed only at task startup.

" + "documentation":"

The dependency condition of the container. The following are the available conditions and their behavior:

  • START - This condition emulates the behavior of links and volumes today. It validates that a dependent container is started before permitting other containers to start.

  • COMPLETE - This condition validates that a dependent container runs to completion (exits) before permitting other containers to start. This can be useful for nonessential containers that run a script and then exit. This condition cannot be set on an essential container.

  • SUCCESS - This condition is the same as COMPLETE, but it also requires that the container exits with a zero status. This condition cannot be set on an essential container.

  • HEALTHY - This condition validates that the dependent container passes its Docker health check before permitting other containers to start. This requires that the dependent container has health checks configured. This condition is confirmed only at task startup.

" } }, "documentation":"

The dependencies defined for container startup and shutdown. A container can contain multiple dependencies. When a dependency is defined for container startup, for container shutdown it is reversed.

Your Amazon ECS container instances require at least version 1.26.0 of the container agent to enable container dependencies. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you are using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init package. If your container instances are launched from version 20190301 or later, then they contain the required versions of the container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.

For tasks using the Fargate launch type, this parameter requires that the task or service uses platform version 1.3.0 or later.

" @@ -1770,11 +1799,11 @@ }, "taskDefinition":{ "shape":"String", - "documentation":"

The family and revision (family:revision) or full ARN of the task definition to run in your service. If a revision is not specified, the latest ACTIVE revision is used.

A task definition must be specified if the service is using the ECS deployment controller.

" + "documentation":"

The family and revision (family:revision) or full ARN of the task definition to run in your service. If a revision is not specified, the latest ACTIVE revision is used.

A task definition must be specified if the service is using either the ECS or CODE_DEPLOY deployment controllers.

" }, "loadBalancers":{ "shape":"LoadBalancers", - "documentation":"

A load balancer object representing the load balancers to use with your service. For more information, see Service Load Balancing in the Amazon Elastic Container Service Developer Guide.

If the service is using the rolling update (ECS) deployment controller and using either an Application Load Balancer or Network Load Balancer, you can specify multiple target groups to attach to the service. The service-linked role is required for services that make use of multiple target groups. For more information, see Using Service-Linked Roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.

If the service is using the CODE_DEPLOY deployment controller, the service is required to use either an Application Load Balancer or Network Load Balancer. When creating an AWS CodeDeploy deployment group, you specify two target groups (referred to as a targetGroupPair). During a deployment, AWS CodeDeploy determines which task set in your service has the status PRIMARY and associates one target group with it, and then associates the other target group with the replacement task set. The load balancer can also have up to two listeners: a required listener for production traffic and an optional listener that allows you perform validation tests with Lambda functions before routing production traffic to it.

After you create a service using the ECS deployment controller, the load balancer name or target group ARN, container name, and container port specified in the service definition are immutable. If you are using the CODE_DEPLOY deployment controller, these values can be changed when updating the service.

For Application Load Balancers and Network Load Balancers, this object must contain the load balancer target group ARN, the container name (as it appears in a container definition), and the container port to access from the load balancer. When a task from this service is placed on a container instance, the container instance and port combination is registered as a target in the target group specified here.

For Classic Load Balancers, this object must contain the load balancer name, the container name (as it appears in a container definition), and the container port to access from the load balancer. When a task from this service is placed on a container instance, the container instance is registered with the load balancer specified here.

Services with tasks that use the awsvpc network mode (for example, those with the Fargate launch type) only support Application Load Balancers and Network Load Balancers. Classic Load Balancers are not supported. Also, when you create any target groups for these services, you must choose ip as the target type, not instance, because tasks that use the awsvpc network mode are associated with an elastic network interface, not an Amazon EC2 instance.

" + "documentation":"

A load balancer object representing the load balancers to use with your service. For more information, see Service Load Balancing in the Amazon Elastic Container Service Developer Guide.

If the service is using the rolling update (ECS) deployment controller and using either an Application Load Balancer or Network Load Balancer, you must specify one or more target group ARNs to attach to the service. The service-linked role is required for services that make use of multiple target groups. For more information, see Using Service-Linked Roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.

If the service is using the CODE_DEPLOY deployment controller, the service is required to use either an Application Load Balancer or Network Load Balancer. When creating an AWS CodeDeploy deployment group, you specify two target groups (referred to as a targetGroupPair). During a deployment, AWS CodeDeploy determines which task set in your service has the status PRIMARY and associates one target group with it, and then associates the other target group with the replacement task set. The load balancer can also have up to two listeners: a required listener for production traffic and an optional listener that allows you perform validation tests with Lambda functions before routing production traffic to it.

After you create a service using the ECS deployment controller, the load balancer name or target group ARN, container name, and container port specified in the service definition are immutable. If you are using the CODE_DEPLOY deployment controller, these values can be changed when updating the service.

For Application Load Balancers and Network Load Balancers, this object must contain the load balancer target group ARN, the container name (as it appears in a container definition), and the container port to access from the load balancer. The load balancer name parameter must be omitted. When a task from this service is placed on a container instance, the container instance and port combination is registered as a target in the target group specified here.

For Classic Load Balancers, this object must contain the load balancer name, the container name (as it appears in a container definition), and the container port to access from the load balancer. The target group ARN parameter must be omitted. When a task from this service is placed on a container instance, the container instance is registered with the load balancer specified here.

Services with tasks that use the awsvpc network mode (for example, those with the Fargate launch type) only support Application Load Balancers and Network Load Balancers. Classic Load Balancers are not supported. Also, when you create any target groups for these services, you must choose ip as the target type, not instance, because tasks that use the awsvpc network mode are associated with an elastic network interface, not an Amazon EC2 instance.

" }, "serviceRegistries":{ "shape":"ServiceRegistries", @@ -2084,6 +2113,10 @@ "shape":"Integer", "documentation":"

The number of tasks in the deployment that are in the RUNNING status.

" }, + "failedTasks":{ + "shape":"Integer", + "documentation":"

The number of consecutively failed tasks in the deployment. A task is considered a failure if the service scheduler can't launch the task, the task doesn't transition to a RUNNING state, or if it fails any of its defined health checks and is stopped.

Once a service deployment has one or more successfully running tasks, the failed task count resets to zero and stops being evaluated.

" + }, "createdAt":{ "shape":"Timestamp", "documentation":"

The Unix timestamp for when the service deployment was created.

" @@ -2107,13 +2140,43 @@ "networkConfiguration":{ "shape":"NetworkConfiguration", "documentation":"

The VPC subnet and security group configuration for tasks that receive their own elastic network interface by using the awsvpc networking mode.

" + }, + "rolloutState":{ + "shape":"DeploymentRolloutState", + "documentation":"

The rolloutState of a service is only returned for services that use the rolling update (ECS) deployment type that are not behind a Classic Load Balancer.

The rollout state of the deployment. When a service deployment is started, it begins in an IN_PROGRESS state. When the service reaches a steady state, the deployment will transition to a COMPLETED state. If the service fails to reach a steady state and circuit breaker is enabled, the deployment will transition to a FAILED state. A deployment in FAILED state will launch no new tasks. For more information, see DeploymentCircuitBreaker.

" + }, + "rolloutStateReason":{ + "shape":"String", + "documentation":"

A description of the rollout state of a deployment.

" } }, "documentation":"

The details of an Amazon ECS service deployment. This is used only when a service uses the ECS deployment controller type.

" }, + "DeploymentCircuitBreaker":{ + "type":"structure", + "required":[ + "enable", + "rollback" + ], + "members":{ + "enable":{ + "shape":"Boolean", + "documentation":"

Whether to enable the deployment circuit breaker logic for the service.

" + }, + "rollback":{ + "shape":"Boolean", + "documentation":"

Whether to enable Amazon ECS to roll back the service if a service deployment fails. If rollback is enabled, when a service deployment fails, the service is rolled back to the last deployment that completed successfully.

" + } + }, + "documentation":"

The deployment circuit breaker can only be used for services using the rolling update (ECS) deployment type that are not behind a Classic Load Balancer.

The deployment circuit breaker determines whether a service deployment will fail if the service can't reach a steady state. If enabled, a service deployment will transition to a failed state and stop launching new tasks. You can also enable Amazon ECS to roll back your service to the last completed deployment after a failure. For more information, see Rolling update in the Amazon Elastic Container Service Developer Guide.

" + }, "DeploymentConfiguration":{ "type":"structure", "members":{ + "deploymentCircuitBreaker":{ + "shape":"DeploymentCircuitBreaker", + "documentation":"

The deployment circuit breaker can only be used for services using the rolling update (ECS) deployment type.

The deployment circuit breaker determines whether a service deployment will fail if the service can't reach a steady state. If deployment circuit breaker is enabled, a service deployment will transition to a failed state and stop launching new tasks. If rollback is enabled, when a service deployment fails, the service is rolled back to the last deployment that completed successfully.

" + }, "maximumPercent":{ "shape":"BoxedInteger", "documentation":"

If a service is using the rolling update (ECS) deployment type, the maximum percent parameter represents an upper limit on the number of tasks in a service that are allowed in the RUNNING or PENDING state during a deployment, as a percentage of the desired number of tasks (rounded down to the nearest integer), and while any container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. This parameter enables you to define the deployment batch size. For example, if your service has a desired number of four tasks and a maximum percent value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default value for maximum percent is 200%.

If a service is using the blue/green (CODE_DEPLOY) or EXTERNAL deployment types and tasks that use the EC2 launch type, the maximum percent value is set to the default value and is used to define the upper limit on the number of the tasks in the service that remain in the RUNNING state while the container instances are in the DRAINING state. If the tasks in the service use the Fargate launch type, the maximum percent value is not used, although it is returned when describing your service.

" @@ -2144,6 +2207,14 @@ "EXTERNAL" ] }, + "DeploymentRolloutState":{ + "type":"string", + "enum":[ + "COMPLETED", + "FAILED", + "IN_PROGRESS" + ] + }, "Deployments":{ "type":"list", "member":{"shape":"Deployment"} @@ -2522,7 +2593,7 @@ "members":{ "accessPointId":{ "shape":"String", - "documentation":"

The Amazon EFS access point ID to use. If an access point is specified, the root directory value specified in the EFSVolumeConfiguration will be relative to the directory set for the access point. If an access point is used, transit encryption must be enabled in the EFSVolumeConfiguration. For more information, see Working with Amazon EFS Access Points in the Amazon Elastic File System User Guide.

" + "documentation":"

The Amazon EFS access point ID to use. If an access point is specified, the root directory value specified in the EFSVolumeConfiguration must either be omitted or set to / which will enforce the path set on the EFS access point. If an access point is used, transit encryption must be enabled in the EFSVolumeConfiguration. For more information, see Working with Amazon EFS Access Points in the Amazon Elastic File System User Guide.

" }, "iam":{ "shape":"EFSAuthorizationConfigIAM", @@ -2555,7 +2626,7 @@ }, "rootDirectory":{ "shape":"String", - "documentation":"

The directory within the Amazon EFS file system to mount as the root directory inside the host. If this parameter is omitted, the root of the Amazon EFS volume will be used. Specifying / will have the same effect as omitting this parameter.

" + "documentation":"

The directory within the Amazon EFS file system to mount as the root directory inside the host. If this parameter is omitted, the root of the Amazon EFS volume will be used. Specifying / will have the same effect as omitting this parameter.

If an EFS access point is specified in the authorizationConfig, the root directory parameter must either be omitted or set to / which will enforce the path set on the EFS access point.

" }, "transitEncryption":{ "shape":"EFSTransitEncryption", @@ -2602,6 +2673,47 @@ "type":"list", "member":{"shape":"KeyValuePair"} }, + "FSxWindowsFileServerAuthorizationConfig":{ + "type":"structure", + "required":[ + "credentialsParameter", + "domain" + ], + "members":{ + "credentialsParameter":{ + "shape":"String", + "documentation":"

The authorization credential option to use. The authorization credential options can be provided using either the Amazon Resource Name (ARN) of an AWS Secrets Manager secret or AWS Systems Manager Parameter Store parameter. The ARNs refer to the stored credentials.

" + }, + "domain":{ + "shape":"String", + "documentation":"

A fully qualified domain name hosted by an AWS Directory Service Managed Microsoft AD (Active Directory) or self-hosted AD on Amazon EC2.

" + } + }, + "documentation":"

The authorization configuration details for Amazon FSx for Windows File Server file system. See FSxWindowsFileServerVolumeConfiguration in the Amazon Elastic Container Service API Reference.

For more information and the input format, see Amazon FSx for Windows File Server Volumes in the Amazon Elastic Container Service Developer Guide.

" + }, + "FSxWindowsFileServerVolumeConfiguration":{ + "type":"structure", + "required":[ + "fileSystemId", + "rootDirectory", + "authorizationConfig" + ], + "members":{ + "fileSystemId":{ + "shape":"String", + "documentation":"

The Amazon FSx for Windows File Server file system ID to use.

" + }, + "rootDirectory":{ + "shape":"String", + "documentation":"

The directory within the Amazon FSx for Windows File Server file system to mount as the root directory inside the host.

" + }, + "authorizationConfig":{ + "shape":"FSxWindowsFileServerAuthorizationConfig", + "documentation":"

The authorization configuration details for the Amazon FSx for Windows File Server file system.

" + } + }, + "documentation":"

This parameter is specified when you are using Amazon FSx for Windows File Server file system for task storage.

For more information and the input format, see Amazon FSx for Windows File Server Volumes in the Amazon Elastic Container Service Developer Guide.

" + }, "Failure":{ "type":"structure", "members":{ @@ -2618,7 +2730,7 @@ "documentation":"

The details of the failure.

" } }, - "documentation":"

A failed resource.

" + "documentation":"

A failed resource. For a list of common causes, see API failure reasons in the Amazon Elastic Container Service Developer Guide.

" }, "Failures":{ "type":"list", @@ -2783,11 +2895,11 @@ "members":{ "add":{ "shape":"StringList", - "documentation":"

The Linux capabilities for the container that have been added to the default configuration provided by Docker. This parameter maps to CapAdd in the Create a container section of the Docker Remote API and the --cap-add option to docker run.

The SYS_PTRACE capability is supported for tasks that use the Fargate launch type if they are also using platform version 1.4.0. The other capabilities are not supported for any platform versions.

Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"

" + "documentation":"

The Linux capabilities for the container that have been added to the default configuration provided by Docker. This parameter maps to CapAdd in the Create a container section of the Docker Remote API and the --cap-add option to docker run.

Tasks launched on AWS Fargate only support adding the SYS_PTRACE kernel capability.

Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"

" }, "drop":{ "shape":"StringList", - "documentation":"

The Linux capabilities for the container that have been removed from the default configuration provided by Docker. This parameter maps to CapDrop in the Create a container section of the Docker Remote API and the --cap-drop option to docker run.

Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"

" + "documentation":"

The Linux capabilities for the container that have been removed from the default configuration provided by Docker. This parameter maps to CapDrop in the Create a container section of the Docker Remote API and the --cap-drop option to docker run.

Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"

" } }, "documentation":"

The Linux capabilities for the container that are added to or dropped from the default configuration provided by Docker. For more information on the default capabilities and the non-default available capabilities, see Runtime privilege and Linux capabilities in the Docker run reference. For more detailed information on these Linux capabilities, see the capabilities(7) Linux manual page.

" @@ -2829,27 +2941,27 @@ }, "devices":{ "shape":"DevicesList", - "documentation":"

Any host devices to expose to the container. This parameter maps to Devices in the Create a container section of the Docker Remote API and the --device option to docker run.

If you are using tasks that use the Fargate launch type, the devices parameter is not supported.

" + "documentation":"

Any host devices to expose to the container. This parameter maps to Devices in the Create a container section of the Docker Remote API and the --device option to docker run.

If you are using tasks that use the Fargate launch type, the devices parameter is not supported.

" }, "initProcessEnabled":{ "shape":"BoxedBoolean", - "documentation":"

Run an init process inside the container that forwards signals and reaps processes. This parameter maps to the --init option to docker run. This parameter requires version 1.25 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

" + "documentation":"

Run an init process inside the container that forwards signals and reaps processes. This parameter maps to the --init option to docker run. This parameter requires version 1.25 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

" }, "sharedMemorySize":{ "shape":"BoxedInteger", - "documentation":"

The value for the size (in MiB) of the /dev/shm volume. This parameter maps to the --shm-size option to docker run.

If you are using tasks that use the Fargate launch type, the sharedMemorySize parameter is not supported.

" + "documentation":"

The value for the size (in MiB) of the /dev/shm volume. This parameter maps to the --shm-size option to docker run.

If you are using tasks that use the Fargate launch type, the sharedMemorySize parameter is not supported.

" }, "tmpfs":{ "shape":"TmpfsList", - "documentation":"

The container path, mount options, and size (in MiB) of the tmpfs mount. This parameter maps to the --tmpfs option to docker run.

If you are using tasks that use the Fargate launch type, the tmpfs parameter is not supported.

" + "documentation":"

The container path, mount options, and size (in MiB) of the tmpfs mount. This parameter maps to the --tmpfs option to docker run.

If you are using tasks that use the Fargate launch type, the tmpfs parameter is not supported.

" }, "maxSwap":{ "shape":"BoxedInteger", - "documentation":"

The total amount of swap memory (in MiB) a container can use. This parameter will be translated to the --memory-swap option to docker run where the value would be the sum of the container memory plus the maxSwap value.

If a maxSwap value of 0 is specified, the container will not use swap. Accepted values are 0 or any positive integer. If the maxSwap parameter is omitted, the container will use the swap configuration for the container instance it is running on. A maxSwap value must be set for the swappiness parameter to be used.

If you are using tasks that use the Fargate launch type, the maxSwap parameter is not supported.

" + "documentation":"

The total amount of swap memory (in MiB) a container can use. This parameter will be translated to the --memory-swap option to docker run where the value would be the sum of the container memory plus the maxSwap value.

If a maxSwap value of 0 is specified, the container will not use swap. Accepted values are 0 or any positive integer. If the maxSwap parameter is omitted, the container will use the swap configuration for the container instance it is running on. A maxSwap value must be set for the swappiness parameter to be used.

If you are using tasks that use the Fargate launch type, the maxSwap parameter is not supported.

" }, "swappiness":{ "shape":"BoxedInteger", - "documentation":"

This allows you to tune a container's memory swappiness behavior. A swappiness value of 0 will cause swapping to not happen unless absolutely necessary. A swappiness value of 100 will cause pages to be swapped very aggressively. Accepted values are whole numbers between 0 and 100. If the swappiness parameter is not specified, a default value of 60 is used. If a value is not specified for maxSwap then this parameter is ignored. This parameter maps to the --memory-swappiness option to docker run.

If you are using tasks that use the Fargate launch type, the swappiness parameter is not supported.

" + "documentation":"

This allows you to tune a container's memory swappiness behavior. A swappiness value of 0 will cause swapping to not happen unless absolutely necessary. A swappiness value of 100 will cause pages to be swapped very aggressively. Accepted values are whole numbers between 0 and 100. If the swappiness parameter is not specified, a default value of 60 is used. If a value is not specified for maxSwap then this parameter is ignored. This parameter maps to the --memory-swappiness option to docker run.

If you are using tasks that use the Fargate launch type, the swappiness parameter is not supported.

" } }, "documentation":"

Linux-specific options that are applied to the container, such as Linux KernelCapabilities.

" @@ -3218,7 +3330,7 @@ "members":{ "logDriver":{ "shape":"LogDriver", - "documentation":"

The log driver to use for the container. The valid values listed earlier are log drivers that the Amazon ECS container agent can communicate with by default.

For tasks using the Fargate launch type, the supported log drivers are awslogs, splunk, and awsfirelens.

For tasks using the EC2 launch type, the supported log drivers are awslogs, fluentd, gelf, json-file, journald, logentries,syslog, splunk, and awsfirelens.

For more information about using the awslogs log driver, see Using the awslogs Log Driver in the Amazon Elastic Container Service Developer Guide.

For more information about using the awsfirelens log driver, see Custom Log Routing in the Amazon Elastic Container Service Developer Guide.

If you have a custom driver that is not listed, you can fork the Amazon ECS container agent project that is available on GitHub and customize it to work with that driver. We encourage you to submit pull requests for changes that you would like to have included. However, we do not currently provide support for running modified copies of this software.

" + "documentation":"

The log driver to use for the container.

For tasks on AWS Fargate, the supported log drivers are awslogs, splunk, and awsfirelens.

For tasks hosted on Amazon EC2 instances, the supported log drivers are awslogs, fluentd, gelf, json-file, journald, logentries,syslog, splunk, and awsfirelens.

For more information about using the awslogs log driver, see Using the awslogs log driver in the Amazon Elastic Container Service Developer Guide.

For more information about using the awsfirelens log driver, see Custom log routing in the Amazon Elastic Container Service Developer Guide.

If you have a custom driver that is not listed, you can fork the Amazon ECS container agent project that is available on GitHub and customize it to work with that driver. We encourage you to submit pull requests for changes that you would like to have included. However, we do not currently provide support for running modified copies of this software.

" }, "options":{ "shape":"LogConfigurationOptionsMap", @@ -3229,7 +3341,7 @@ "documentation":"

The secrets to pass to the log configuration. For more information, see Specifying Sensitive Data in the Amazon Elastic Container Service Developer Guide.

" } }, - "documentation":"

The log configuration specification for the container.

This parameter maps to LogConfig in the Create a container section of the Docker Remote API and the --log-driver option to docker run . By default, containers use the same logging driver that the Docker daemon uses; however the container may use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information on the options for different supported log drivers, see Configure logging drivers in the Docker documentation.

The following should be noted when specifying a log configuration for your containers:

  • Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in the valid values below). Additional log drivers may be available in future releases of the Amazon ECS container agent.

  • This parameter requires version 1.18 of the Docker Remote API or greater on your container instance.

  • For tasks using the EC2 launch type, the Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

  • For tasks using the Fargate launch type, because you do not have access to the underlying infrastructure your tasks are hosted on, any additional software needed will have to be installed outside of the task. For example, the Fluentd output aggregators or a remote host running Logstash to send Gelf logs to.

" + "documentation":"

The log configuration for the container. This parameter maps to LogConfig in the Create a container section of the Docker Remote API and the --log-driver option to docker run .

By default, containers use the same logging driver that the Docker daemon uses; however the container may use a different logging driver than the Docker daemon by specifying a log driver configuration in the container definition. For more information on the options for different supported log drivers, see Configure logging drivers in the Docker documentation.

The following should be noted when specifying a log configuration for your containers:

  • Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in the valid values below). Additional log drivers may be available in future releases of the Amazon ECS container agent.

  • This parameter requires version 1.18 of the Docker Remote API or greater on your container instance.

  • For tasks hosted on Amazon EC2 instances, the Amazon ECS container agent must register the available logging drivers with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS container agent configuration in the Amazon Elastic Container Service Developer Guide.

  • For tasks on AWS Fargate, because you do not have access to the underlying infrastructure your tasks are hosted on, any additional software needed will have to be installed outside of the task. For example, the Fluentd output aggregators or a remote host running Logstash to send Gelf logs to.

" }, "LogConfigurationOptionsMap":{ "type":"map", @@ -3268,10 +3380,19 @@ "maximumScalingStepSize":{ "shape":"ManagedScalingStepSize", "documentation":"

The maximum number of container instances that Amazon ECS will scale in or scale out at one time. If this parameter is omitted, the default value of 10000 is used.

" + }, + "instanceWarmupPeriod":{ + "shape":"ManagedScalingInstanceWarmupPeriod", + "documentation":"

The period of time, in seconds, after a newly launched Amazon EC2 instance can contribute to CloudWatch metrics for Auto Scaling group. If this parameter is omitted, the default value of 300 seconds is used.

" } }, "documentation":"

The managed scaling settings for the Auto Scaling group capacity provider.

When managed scaling is enabled, Amazon ECS manages the scale-in and scale-out actions of the Auto Scaling group. Amazon ECS manages a target tracking scaling policy using an Amazon ECS-managed CloudWatch metric with the specified targetCapacity value as the target value for the metric. For more information, see Using Managed Scaling in the Amazon Elastic Container Service Developer Guide.

If managed scaling is disabled, the user must manage the scaling of the Auto Scaling group.

" }, + "ManagedScalingInstanceWarmupPeriod":{ + "type":"integer", + "max":10000, + "min":0 + }, "ManagedScalingStatus":{ "type":"string", "enum":[ @@ -3502,7 +3623,7 @@ "members":{ "containerPort":{ "shape":"BoxedInteger", - "documentation":"

The port number on the container that is bound to the user-specified or automatically assigned host port.

If you are using containers in a task with the awsvpc or host network mode, exposed ports should be specified using containerPort.

If you are using containers in a task with the bridge network mode and you specify a container port and not a host port, your container automatically receives a host port in the ephemeral port range. For more information, see hostPort. Port mappings that are automatically assigned in this way do not count toward the 100 reserved ports limit of a container instance.

You cannot expose the same container port for multiple protocols. An error will be returned if this is attempted.

" + "documentation":"

The port number on the container that is bound to the user-specified or automatically assigned host port.

If you are using containers in a task with the awsvpc or host network mode, exposed ports should be specified using containerPort.

If you are using containers in a task with the bridge network mode and you specify a container port and not a host port, your container automatically receives a host port in the ephemeral port range. For more information, see hostPort. Port mappings that are automatically assigned in this way do not count toward the 100 reserved ports limit of a container instance.

" }, "hostPort":{ "shape":"BoxedInteger", @@ -3543,7 +3664,7 @@ "documentation":"

The set of network configuration parameters to provide the Container Network Interface (CNI) plugin, specified as key-value pairs.

  • IgnoredUID - (Required) The user ID (UID) of the proxy container as defined by the user parameter in a container definition. This is used to ensure the proxy ignores its own traffic. If IgnoredGID is specified, this field can be empty.

  • IgnoredGID - (Required) The group ID (GID) of the proxy container as defined by the user parameter in a container definition. This is used to ensure the proxy ignores its own traffic. If IgnoredUID is specified, this field can be empty.

  • AppPorts - (Required) The list of ports that the application uses. Network traffic to these ports is forwarded to the ProxyIngressPort and ProxyEgressPort.

  • ProxyIngressPort - (Required) Specifies the port that incoming traffic to the AppPorts is directed to.

  • ProxyEgressPort - (Required) Specifies the port that outgoing traffic from the AppPorts is directed to.

  • EgressIgnoredPorts - (Required) The egress traffic going to the specified ports is ignored and not redirected to the ProxyEgressPort. It can be an empty list.

  • EgressIgnoredIPs - (Required) The egress traffic going to the specified IP addresses is ignored and not redirected to the ProxyEgressPort. It can be an empty list.

" } }, - "documentation":"

The configuration details for the App Mesh proxy.

For tasks using the EC2 launch type, the container instances require at least version 1.26.0 of the container agent and at least version 1.26.0-1 of the ecs-init package to enable a proxy configuration. If your container instances are launched from the Amazon ECS-optimized AMI version 20190301 or later, then they contain the required versions of the container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.

For tasks using the Fargate launch type, the task or service requires platform version 1.3.0 or later.

" + "documentation":"

The configuration details for the App Mesh proxy.

For tasks using the EC2 launch type, the container instances require at least version 1.26.0 of the container agent and at least version 1.26.0-1 of the ecs-init package to enable a proxy configuration. If your container instances are launched from the Amazon ECS-optimized AMI version 20190301 or later, then they contain the required versions of the container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI

" }, "ProxyConfigurationProperties":{ "type":"list", @@ -3728,7 +3849,7 @@ }, "networkMode":{ "shape":"NetworkMode", - "documentation":"

The Docker networking mode to use for the containers in the task. The valid values are none, bridge, awsvpc, and host. The default Docker network mode is bridge. If you are using the Fargate launch type, the awsvpc network mode is required. If you are using the EC2 launch type, any network mode can be used. If the network mode is set to none, you cannot specify port mappings in your container definitions, and the tasks containers do not have external connectivity. The host and awsvpc network modes offer the highest networking performance for containers because they use the EC2 network stack instead of the virtualized network stack provided by the bridge mode.

With the host and awsvpc network modes, exposed container ports are mapped directly to the corresponding host port (for the host network mode) or the attached elastic network interface port (for the awsvpc network mode), so you cannot take advantage of dynamic host port mappings.

If the network mode is awsvpc, the task is allocated an elastic network interface, and you must specify a NetworkConfiguration value when you create a service or run a task with the task definition. For more information, see Task Networking in the Amazon Elastic Container Service Developer Guide.

Currently, only Amazon ECS-optimized AMIs, other Amazon Linux variants with the ecs-init package, or AWS Fargate infrastructure support the awsvpc network mode.

If the network mode is host, you cannot run multiple instantiations of the same task on a single container instance when port mappings are used.

Docker for Windows uses different network modes than Docker for Linux. When you register a task definition with Windows containers, you must not specify a network mode. If you use the console to register a task definition with Windows containers, you must choose the <default> network mode object.

For more information, see Network settings in the Docker run reference.

" + "documentation":"

The Docker networking mode to use for the containers in the task. The valid values are none, bridge, awsvpc, and host. If no network mode is specified, the default is bridge.

For Amazon ECS tasks on Fargate, the awsvpc network mode is required. For Amazon ECS tasks on Amazon EC2 instances, any network mode can be used. If the network mode is set to none, you cannot specify port mappings in your container definitions, and the tasks containers do not have external connectivity. The host and awsvpc network modes offer the highest networking performance for containers because they use the EC2 network stack instead of the virtualized network stack provided by the bridge mode.

With the host and awsvpc network modes, exposed container ports are mapped directly to the corresponding host port (for the host network mode) or the attached elastic network interface port (for the awsvpc network mode), so you cannot take advantage of dynamic host port mappings.

When using the host network mode, you should not run containers using the root user (UID 0). It is considered best practice to use a non-root user.

If the network mode is awsvpc, the task is allocated an elastic network interface, and you must specify a NetworkConfiguration value when you create a service or run a task with the task definition. For more information, see Task Networking in the Amazon Elastic Container Service Developer Guide.

Currently, only Amazon ECS-optimized AMIs, other Amazon Linux variants with the ecs-init package, or AWS Fargate infrastructure support the awsvpc network mode.

If the network mode is host, you cannot run multiple instantiations of the same task on a single container instance when port mappings are used.

Docker for Windows uses different network modes than Docker for Linux. When you register a task definition with Windows containers, you must not specify a network mode. If you use the console to register a task definition with Windows containers, you must choose the <default> network mode object.

For more information, see Network settings in the Docker run reference.

" }, "containerDefinitions":{ "shape":"ContainerDefinitions", @@ -3744,7 +3865,7 @@ }, "requiresCompatibilities":{ "shape":"CompatibilityList", - "documentation":"

The launch type required by the task. If no value is specified, it defaults to EC2.

" + "documentation":"

The task launch type that Amazon ECS should validate the task definition against. This ensures that the task definition parameters are compatible with the specified launch type. If no value is specified, it defaults to EC2.

" }, "cpu":{ "shape":"String", @@ -4504,7 +4625,7 @@ "documentation":"

The value for the namespaced kernel parameter specified in namespace.

" } }, - "documentation":"

A list of namespaced kernel parameters to set in the container. This parameter maps to Sysctls in the Create a container section of the Docker Remote API and the --sysctl option to docker run.

It is not recommended that you specify network-related systemControls parameters for multiple containers in a single task that also uses either the awsvpc or host network mode for the following reasons:

  • For tasks that use the awsvpc network mode, if you set systemControls for any container, it applies to all containers in the task. If you set different systemControls for multiple containers in a single task, the container that is started last determines which systemControls take effect.

  • For tasks that use the host network mode, the systemControls parameter applies to the container instance's kernel parameter as well as that of all containers of any tasks running on that container instance.

" + "documentation":"

A list of namespaced kernel parameters to set in the container. This parameter maps to Sysctls in the Create a container section of the Docker Remote API and the --sysctl option to docker run.

It is not recommended that you specify network-related systemControls parameters for multiple containers in a single task that also uses either the awsvpc or host network mode for the following reasons:

  • For tasks that use the awsvpc network mode, if you set systemControls for any container, it applies to all containers in the task. If you set different systemControls for multiple containers in a single task, the container that is started last determines which systemControls take effect.

  • For tasks that use the host network mode, the systemControls parameter applies to the container instance's kernel parameter as well as that of all containers of any tasks running on that container instance.

" }, "SystemControls":{ "type":"list", @@ -4742,7 +4863,7 @@ }, "networkMode":{ "shape":"NetworkMode", - "documentation":"

The Docker networking mode to use for the containers in the task. The valid values are none, bridge, awsvpc, and host. The default Docker network mode is bridge. If you are using the Fargate launch type, the awsvpc network mode is required. If you are using the EC2 launch type, any network mode can be used. If the network mode is set to none, you cannot specify port mappings in your container definitions, and the tasks containers do not have external connectivity. The host and awsvpc network modes offer the highest networking performance for containers because they use the EC2 network stack instead of the virtualized network stack provided by the bridge mode.

With the host and awsvpc network modes, exposed container ports are mapped directly to the corresponding host port (for the host network mode) or the attached elastic network interface port (for the awsvpc network mode), so you cannot take advantage of dynamic host port mappings.

If the network mode is awsvpc, the task is allocated an elastic network interface, and you must specify a NetworkConfiguration value when you create a service or run a task with the task definition. For more information, see Task Networking in the Amazon Elastic Container Service Developer Guide.

Currently, only Amazon ECS-optimized AMIs, other Amazon Linux variants with the ecs-init package, or AWS Fargate infrastructure support the awsvpc network mode.

If the network mode is host, you cannot run multiple instantiations of the same task on a single container instance when port mappings are used.

Docker for Windows uses different network modes than Docker for Linux. When you register a task definition with Windows containers, you must not specify a network mode. If you use the console to register a task definition with Windows containers, you must choose the <default> network mode object.

For more information, see Network settings in the Docker run reference.

" + "documentation":"

The Docker networking mode to use for the containers in the task. The valid values are none, bridge, awsvpc, and host. If no network mode is specified, the default is bridge.

For Amazon ECS tasks on Fargate, the awsvpc network mode is required. For Amazon ECS tasks on Amazon EC2 instances, any network mode can be used. If the network mode is set to none, you cannot specify port mappings in your container definitions, and the tasks containers do not have external connectivity. The host and awsvpc network modes offer the highest networking performance for containers because they use the EC2 network stack instead of the virtualized network stack provided by the bridge mode.

With the host and awsvpc network modes, exposed container ports are mapped directly to the corresponding host port (for the host network mode) or the attached elastic network interface port (for the awsvpc network mode), so you cannot take advantage of dynamic host port mappings.

When using the host network mode, you should not run containers using the root user (UID 0). It is considered best practice to use a non-root user.

If the network mode is awsvpc, the task is allocated an elastic network interface, and you must specify a NetworkConfiguration value when you create a service or run a task with the task definition. For more information, see Task Networking in the Amazon Elastic Container Service Developer Guide.

Currently, only Amazon ECS-optimized AMIs, other Amazon Linux variants with the ecs-init package, or AWS Fargate infrastructure support the awsvpc network mode.

If the network mode is host, you cannot run multiple instantiations of the same task on a single container instance when port mappings are used.

Docker for Windows uses different network modes than Docker for Linux. When you register a task definition with Windows containers, you must not specify a network mode. If you use the console to register a task definition with Windows containers, you must choose the <default> network mode object.

For more information, see Network settings in the Docker run reference.

" }, "revision":{ "shape":"Integer", @@ -4778,7 +4899,7 @@ }, "memory":{ "shape":"String", - "documentation":"

The amount (in MiB) of memory used by the task.

If using the EC2 launch type, this field is optional and any value can be used. If a task-level memory value is specified then the container-level memory value is optional.

If using the Fargate launch type, this field is required and you must use one of the following values, which determines your range of valid values for the cpu parameter:

  • 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu values: 256 (.25 vCPU)

  • 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available cpu values: 512 (.5 vCPU)

  • 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) - Available cpu values: 1024 (1 vCPU)

  • Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - Available cpu values: 2048 (2 vCPU)

  • Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - Available cpu values: 4096 (4 vCPU)

" + "documentation":"

The amount (in MiB) of memory used by the task.

If using the EC2 launch type, you must specify either a task-level memory value or a container-level memory value. This field is optional and any value can be used. If a task-level memory value is specified then the container-level memory value is optional. For more information regarding container-level memory and memory reservation, see ContainerDefinition.

If using the Fargate launch type, this field is required and you must use one of the following values, which determines your range of valid values for the cpu parameter:

  • 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu values: 256 (.25 vCPU)

  • 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available cpu values: 512 (.5 vCPU)

  • 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) - Available cpu values: 1024 (1 vCPU)

  • Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - Available cpu values: 2048 (2 vCPU)

  • Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - Available cpu values: 4096 (4 vCPU)

" }, "inferenceAccelerators":{ "shape":"InferenceAccelerators", @@ -5025,7 +5146,7 @@ }, "size":{ "shape":"Integer", - "documentation":"

The size (in MiB) of the tmpfs volume.

" + "documentation":"

The maximum size (in MiB) of the tmpfs volume.

" }, "mountOptions":{ "shape":"StringList", @@ -5121,6 +5242,29 @@ "members":{ } }, + "UpdateCapacityProviderRequest":{ + "type":"structure", + "required":[ + "name", + "autoScalingGroupProvider" + ], + "members":{ + "name":{ + "shape":"String", + "documentation":"

An object representing the parameters to update for the Auto Scaling group capacity provider.

" + }, + "autoScalingGroupProvider":{ + "shape":"AutoScalingGroupProviderUpdate", + "documentation":"

The name of the capacity provider to update.

" + } + } + }, + "UpdateCapacityProviderResponse":{ + "type":"structure", + "members":{ + "capacityProvider":{"shape":"CapacityProvider"} + } + }, "UpdateClusterSettingsRequest":{ "type":"structure", "required":[ @@ -5362,9 +5506,13 @@ "efsVolumeConfiguration":{ "shape":"EFSVolumeConfiguration", "documentation":"

This parameter is specified when you are using an Amazon Elastic File System file system for task storage.

" + }, + "fsxWindowsFileServerVolumeConfiguration":{ + "shape":"FSxWindowsFileServerVolumeConfiguration", + "documentation":"

This parameter is specified when you are using Amazon FSx for Windows File Server file system for task storage.

" } }, - "documentation":"

A data volume used in a task definition. For tasks that use Amazon Elastic File System (Amazon EFS) file storage, specify an efsVolumeConfiguration. For tasks that use a Docker volume, specify a DockerVolumeConfiguration. For tasks that use a bind mount host volume, specify a host and optional sourcePath. For more information, see Using Data Volumes in Tasks.

" + "documentation":"

A data volume used in a task definition. For tasks that use the Amazon Elastic File System (Amazon EFS), specify an efsVolumeConfiguration. For Windows tasks that use Amazon FSx for Windows File Server file system, specify a fsxWindowsFileServerVolumeConfiguration. For tasks that use a Docker volume, specify a DockerVolumeConfiguration. For tasks that use a bind mount host volume, specify a host and optional sourcePath. For more information, see Using Data Volumes in Tasks.

" }, "VolumeFrom":{ "type":"structure", diff --git a/services/efs/pom.xml b/services/efs/pom.xml index d1ebf1bb3065..081907ef0e5c 100644 --- a/services/efs/pom.xml +++ b/services/efs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT efs AWS Java SDK :: Services :: Amazon Elastic File System diff --git a/services/eks/pom.xml b/services/eks/pom.xml index 7bb60dbbddb1..f32b9ec9f02c 100644 --- a/services/eks/pom.xml +++ b/services/eks/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT eks AWS Java SDK :: Services :: EKS diff --git a/services/eks/src/main/resources/codegen-resources/paginators-1.json b/services/eks/src/main/resources/codegen-resources/paginators-1.json index 6d1c327dc5be..c21360409cd8 100644 --- a/services/eks/src/main/resources/codegen-resources/paginators-1.json +++ b/services/eks/src/main/resources/codegen-resources/paginators-1.json @@ -1,5 +1,17 @@ { "pagination": { + "DescribeAddonVersions": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "addons" + }, + "ListAddons": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "addons" + }, "ListClusters": { "input_token": "nextToken", "limit_key": "maxResults", diff --git a/services/eks/src/main/resources/codegen-resources/service-2.json b/services/eks/src/main/resources/codegen-resources/service-2.json index 058cb92c8098..bfedc15d9127 100644 --- a/services/eks/src/main/resources/codegen-resources/service-2.json +++ b/services/eks/src/main/resources/codegen-resources/service-2.json @@ -13,6 +13,24 @@ "uid":"eks-2017-11-01" }, "operations":{ + "CreateAddon":{ + "name":"CreateAddon", + "http":{ + "method":"POST", + "requestUri":"/clusters/{name}/addons" + }, + "input":{"shape":"CreateAddonRequest"}, + "output":{"shape":"CreateAddonResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"}, + {"shape":"ClientException"}, + {"shape":"ServerException"} + ], + "documentation":"

Creates an Amazon EKS add-on.

Amazon EKS add-ons help to automate the provisioning and lifecycle management of common operational software for Amazon EKS clusters. Amazon EKS add-ons can only be used with Amazon EKS clusters running version 1.18 with platform version eks.3 or later because add-ons rely on the Server-side Apply Kubernetes feature, which is only available in Kubernetes 1.18 and later.

" + }, "CreateCluster":{ "name":"CreateCluster", "http":{ @@ -67,7 +85,24 @@ {"shape":"ServerException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Creates a managed worker node group for an Amazon EKS cluster. You can only create a node group for your cluster that is equal to the current Kubernetes version for the cluster. All node groups are created with the latest AMI release version for the respective minor Kubernetes version of the cluster, unless you deploy a custom AMI using a launch template. For more information about using launch templates, see Launch template support.

An Amazon EKS managed node group is an Amazon EC2 Auto Scaling group and associated Amazon EC2 instances that are managed by AWS for an Amazon EKS cluster. Each node group uses a version of the Amazon EKS-optimized Amazon Linux 2 AMI. For more information, see Managed Node Groups in the Amazon EKS User Guide.

" + "documentation":"

Creates a managed worker node group for an Amazon EKS cluster. You can only create a node group for your cluster that is equal to the current Kubernetes version for the cluster. All node groups are created with the latest AMI release version for the respective minor Kubernetes version of the cluster, unless you deploy a custom AMI using a launch template. For more information about using launch templates, see Launch template support.

An Amazon EKS managed node group is an Amazon EC2 Auto Scaling group and associated Amazon EC2 instances that are managed by AWS for an Amazon EKS cluster. Each node group uses a version of the Amazon EKS optimized Amazon Linux 2 AMI. For more information, see Managed Node Groups in the Amazon EKS User Guide.

" + }, + "DeleteAddon":{ + "name":"DeleteAddon", + "http":{ + "method":"DELETE", + "requestUri":"/clusters/{name}/addons/{addonName}" + }, + "input":{"shape":"DeleteAddonRequest"}, + "output":{"shape":"DeleteAddonResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ClientException"}, + {"shape":"ServerException"} + ], + "documentation":"

Delete an Amazon EKS add-on.

When you remove the add-on, it will also be deleted from the cluster. You can always manually start an add-on on the cluster using the Kubernetes API.

" }, "DeleteCluster":{ "name":"DeleteCluster", @@ -120,6 +155,38 @@ ], "documentation":"

Deletes an Amazon EKS node group for a cluster.

" }, + "DescribeAddon":{ + "name":"DescribeAddon", + "http":{ + "method":"GET", + "requestUri":"/clusters/{name}/addons/{addonName}" + }, + "input":{"shape":"DescribeAddonRequest"}, + "output":{"shape":"DescribeAddonResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ClientException"}, + {"shape":"ServerException"} + ], + "documentation":"

Describes an Amazon EKS add-on.

" + }, + "DescribeAddonVersions":{ + "name":"DescribeAddonVersions", + "http":{ + "method":"GET", + "requestUri":"/addons/supported-versions" + }, + "input":{"shape":"DescribeAddonVersionsRequest"}, + "output":{"shape":"DescribeAddonVersionsResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"} + ], + "documentation":"

Describes the Kubernetes versions that the add-on can be used with.

" + }, "DescribeCluster":{ "name":"DescribeCluster", "http":{ @@ -185,6 +252,23 @@ ], "documentation":"

Returns descriptive information about an update against your Amazon EKS cluster or associated managed node group.

When the status of the update is Succeeded, the update is complete. If an update fails, the status is Failed, and an error detail explains the reason for the failure.

" }, + "ListAddons":{ + "name":"ListAddons", + "http":{ + "method":"GET", + "requestUri":"/clusters/{name}/addons" + }, + "input":{"shape":"ListAddonsRequest"}, + "output":{"shape":"ListAddonsResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ClientException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServerException"} + ], + "documentation":"

Lists the available add-ons.

" + }, "ListClusters":{ "name":"ListClusters", "http":{ @@ -292,6 +376,24 @@ ], "documentation":"

Deletes specified tags from a resource.

" }, + "UpdateAddon":{ + "name":"UpdateAddon", + "http":{ + "method":"POST", + "requestUri":"/clusters/{name}/addons/{addonName}/update" + }, + "input":{"shape":"UpdateAddonRequest"}, + "output":{"shape":"UpdateAddonResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"}, + {"shape":"ClientException"}, + {"shape":"ServerException"} + ], + "documentation":"

Updates an Amazon EKS add-on.

" + }, "UpdateClusterConfig":{ "name":"UpdateClusterConfig", "http":{ @@ -362,7 +464,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Updates the Kubernetes version or AMI version of an Amazon EKS managed node group.

You can update a node group using a launch template only if the node group was originally deployed with a launch template. If you need to update a custom AMI in a node group that was deployed with a launch template, then update your custom AMI, specify the new ID in a new version of the launch template, and then update the node group to the new version of the launch template.

If you update without a launch template, then you can update to the latest available AMI version of a node group's current Kubernetes version by not specifying a Kubernetes version in the request. You can update to the latest AMI version of your cluster's current Kubernetes version by specifying your cluster's Kubernetes version in the request. For more information, see Amazon EKS-Optimized Linux AMI Versions in the Amazon EKS User Guide.

You cannot roll back a node group to an earlier Kubernetes version or AMI version.

When a node in a managed node group is terminated due to a scaling action or update, the pods in that node are drained first. Amazon EKS attempts to drain the nodes gracefully and will fail if it is unable to do so. You can force the update if Amazon EKS is unable to drain the nodes as a result of a pod disruption budget issue.

" + "documentation":"

Updates the Kubernetes version or AMI version of an Amazon EKS managed node group.

You can update a node group using a launch template only if the node group was originally deployed with a launch template. If you need to update a custom AMI in a node group that was deployed with a launch template, then update your custom AMI, specify the new ID in a new version of the launch template, and then update the node group to the new version of the launch template.

If you update without a launch template, then you can update to the latest available AMI version of a node group's current Kubernetes version by not specifying a Kubernetes version in the request. You can update to the latest AMI version of your cluster's current Kubernetes version by specifying your cluster's Kubernetes version in the request. For more information, see Amazon EKS optimized Amazon Linux 2 AMI versions in the Amazon EKS User Guide.

You cannot roll back a node group to an earlier Kubernetes version or AMI version.

When a node in a managed node group is terminated due to a scaling action or update, the pods in that node are drained first. Amazon EKS attempts to drain the nodes gracefully and will fail if it is unable to do so. You can force the update if Amazon EKS is unable to drain the nodes as a result of a pod disruption budget issue.

" } }, "shapes":{ @@ -374,6 +476,150 @@ "AL2_ARM_64" ] }, + "Addon":{ + "type":"structure", + "members":{ + "addonName":{ + "shape":"String", + "documentation":"

The name of the add-on.

" + }, + "clusterName":{ + "shape":"ClusterName", + "documentation":"

The name of the cluster.

" + }, + "status":{ + "shape":"AddonStatus", + "documentation":"

The status of the add-on.

" + }, + "addonVersion":{ + "shape":"String", + "documentation":"

The version of the add-on.

" + }, + "health":{ + "shape":"AddonHealth", + "documentation":"

An object that represents the health of the add-on.

" + }, + "addonArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the add-on.

" + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

The date and time that the add-on was created.

" + }, + "modifiedAt":{ + "shape":"Timestamp", + "documentation":"

The date and time that the add-on was last modified.

" + }, + "serviceAccountRoleArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the IAM role that is bound to the Kubernetes service account used by the add-on.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The metadata that you apply to the cluster to assist with categorization and organization. Each tag consists of a key and an optional value, both of which you define. Cluster tags do not propagate to any other resources associated with the cluster.

" + } + }, + "documentation":"

An Amazon EKS add-on.

" + }, + "AddonHealth":{ + "type":"structure", + "members":{ + "issues":{ + "shape":"AddonIssueList", + "documentation":"

An object that represents the add-on's health issues.

" + } + }, + "documentation":"

The health of the add-on.

" + }, + "AddonInfo":{ + "type":"structure", + "members":{ + "addonName":{ + "shape":"String", + "documentation":"

The name of the add-on.

" + }, + "type":{ + "shape":"String", + "documentation":"

The type of the add-on.

" + }, + "addonVersions":{ + "shape":"AddonVersionInfoList", + "documentation":"

An object that represents information about available add-on versions and compatible Kubernetes versions.

" + } + }, + "documentation":"

Information about an add-on.

" + }, + "AddonIssue":{ + "type":"structure", + "members":{ + "code":{ + "shape":"AddonIssueCode", + "documentation":"

A code that describes the type of issue.

" + }, + "message":{ + "shape":"String", + "documentation":"

A message that provides details about the issue and what might cause it.

" + }, + "resourceIds":{ + "shape":"StringList", + "documentation":"

The resource IDs of the issue.

" + } + }, + "documentation":"

An issue related to an add-on.

" + }, + "AddonIssueCode":{ + "type":"string", + "enum":[ + "AccessDenied", + "InternalFailure", + "ClusterUnreachable", + "InsufficientNumberOfReplicas", + "ConfigurationConflict" + ] + }, + "AddonIssueList":{ + "type":"list", + "member":{"shape":"AddonIssue"} + }, + "AddonStatus":{ + "type":"string", + "enum":[ + "CREATING", + "ACTIVE", + "CREATE_FAILED", + "UPDATING", + "DELETING", + "DELETE_FAILED", + "DEGRADED" + ] + }, + "AddonVersionInfo":{ + "type":"structure", + "members":{ + "addonVersion":{ + "shape":"String", + "documentation":"

The version of the add-on.

" + }, + "architecture":{ + "shape":"StringList", + "documentation":"

The architectures that the version supports.

" + }, + "compatibilities":{ + "shape":"Compatibilities", + "documentation":"

An object that represents the compatibilities of a version.

" + } + }, + "documentation":"

Information about an add-on version.

" + }, + "AddonVersionInfoList":{ + "type":"list", + "member":{"shape":"AddonVersionInfo"} + }, + "Addons":{ + "type":"list", + "member":{"shape":"AddonInfo"} + }, "AutoScalingGroup":{ "type":"structure", "members":{ @@ -411,6 +657,13 @@ "box":true, "min":1 }, + "CapacityTypes":{ + "type":"string", + "enum":[ + "ON_DEMAND", + "SPOT" + ] + }, "Certificate":{ "type":"structure", "members":{ @@ -432,6 +685,7 @@ "shape":"String", "documentation":"

The Amazon EKS managed node group associated with the exception.

" }, + "addonName":{"shape":"String"}, "message":{"shape":"String"} }, "documentation":"

These errors are usually caused by a client action. Actions can include using an action or resource on behalf of a user that doesn't have permissions to use the action or resource or specifying an identifier that is not valid.

", @@ -471,7 +725,7 @@ }, "kubernetesNetworkConfig":{ "shape":"KubernetesNetworkConfigResponse", - "documentation":"

Network configuration settings for your cluster.

" + "documentation":"

The Kubernetes network configuration for the cluster.

" }, "logging":{ "shape":"Logging", @@ -524,6 +778,74 @@ "UPDATING" ] }, + "Compatibilities":{ + "type":"list", + "member":{"shape":"Compatibility"} + }, + "Compatibility":{ + "type":"structure", + "members":{ + "clusterVersion":{ + "shape":"String", + "documentation":"

The supported Kubernetes version of the cluster.

" + }, + "platformVersions":{ + "shape":"StringList", + "documentation":"

The supported compute platform.

" + }, + "defaultVersion":{ + "shape":"Boolean", + "documentation":"

The supported default version.

" + } + }, + "documentation":"

Compatibility information.

" + }, + "CreateAddonRequest":{ + "type":"structure", + "required":[ + "clusterName", + "addonName" + ], + "members":{ + "clusterName":{ + "shape":"ClusterName", + "documentation":"

The name of the cluster to create the add-on for.

", + "location":"uri", + "locationName":"name" + }, + "addonName":{ + "shape":"String", + "documentation":"

The name of the add-on. The name must match one of the names returned by ListAddons .

" + }, + "addonVersion":{ + "shape":"String", + "documentation":"

The version of the add-on. The version must match one of the versions returned by DescribeAddonVersions .

" + }, + "serviceAccountRoleArn":{ + "shape":"RoleArn", + "documentation":"

The Amazon Resource Name (ARN) of an existing IAM role to bind to the add-on's service account. The role must be assigned the IAM permissions required by the add-on. If you don't specify an existing IAM role, then the add-on uses the permissions assigned to the node IAM role. For more information, see Amazon EKS node IAM role in the Amazon EKS User Guide.

To specify an existing IAM role, you must have an IAM OpenID Connect (OIDC) provider created for your cluster. For more information, see Enabling IAM roles for service accounts on your cluster in the Amazon EKS User Guide.

" + }, + "resolveConflicts":{ + "shape":"ResolveConflicts", + "documentation":"

How to resolve parameter value conflicts when migrating an existing add-on to an Amazon EKS add-on.

" + }, + "clientRequestToken":{ + "shape":"String", + "documentation":"

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

", + "idempotencyToken":true + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The metadata to apply to the cluster to assist with categorization and organization. Each tag consists of a key and an optional value, both of which you define.

" + } + } + }, + "CreateAddonResponse":{ + "type":"structure", + "members":{ + "addon":{"shape":"Addon"} + } + }, "CreateClusterRequest":{ "type":"structure", "required":[ @@ -663,11 +985,11 @@ }, "instanceTypes":{ "shape":"StringList", - "documentation":"

The instance type to use for your node group. You can specify a single instance type for a node group. The default value for instanceTypes is t3.medium. If you choose a GPU instance type, be sure to specify AL2_x86_64_GPU with the amiType parameter. If you specify launchTemplate, then don't specify instanceTypes, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.

" + "documentation":"

Specify the instance types for a node group. If you specify a GPU instance type, be sure to specify AL2_x86_64_GPU with the amiType parameter. If you specify launchTemplate, then you can specify zero or one instance type in your launch template or you can specify 0-20 instance types for instanceTypes. If however, you specify an instance type in your launch template and specify any instanceTypes, the node group deployment will fail. If you don't specify an instance type in a launch template or for instanceTypes, then t3.medium is used, by default. If you specify Spot for capacityType, then we recommend specifying multiple values for instanceTypes. For more information, see Managed node group capacity types and Launch template support in the Amazon EKS User Guide.

" }, "amiType":{ "shape":"AMITypes", - "documentation":"

The AMI type for your node group. GPU instance types should use the AL2_x86_64_GPU AMI type. Non-GPU instances should use the AL2_x86_64 AMI type. Arm instances should use the AL2_ARM_64 AMI type. All types use the Amazon EKS-optimized Amazon Linux 2 AMI. If you specify launchTemplate, and your launch template uses a custom AMI, then don't specify amiType, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.

" + "documentation":"

The AMI type for your node group. GPU instance types should use the AL2_x86_64_GPU AMI type. Non-GPU instances should use the AL2_x86_64 AMI type. Arm instances should use the AL2_ARM_64 AMI type. All types use the Amazon EKS optimized Amazon Linux 2 AMI. If you specify launchTemplate, and your launch template uses a custom AMI, then don't specify amiType, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.

" }, "remoteAccess":{ "shape":"RemoteAccessConfig", @@ -694,13 +1016,17 @@ "shape":"LaunchTemplateSpecification", "documentation":"

An object representing a node group's launch template specification. If specified, then do not specify instanceTypes, diskSize, or remoteAccess and make sure that the launch template meets the requirements in launchTemplateSpecification.

" }, + "capacityType":{ + "shape":"CapacityTypes", + "documentation":"

The capacity type for your node group.

" + }, "version":{ "shape":"String", "documentation":"

The Kubernetes version to use for your managed nodes. By default, the Kubernetes version of the cluster is used, and this is the only accepted specified value. If you specify launchTemplate, and your launch template uses a custom AMI, then don't specify version, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.

" }, "releaseVersion":{ "shape":"String", - "documentation":"

The AMI version of the Amazon EKS-optimized AMI to use with your node group. By default, the latest available AMI version for the node group's current Kubernetes version is used. For more information, see Amazon EKS-Optimized Linux AMI Versions in the Amazon EKS User Guide. If you specify launchTemplate, and your launch template uses a custom AMI, then don't specify releaseVersion, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.

" + "documentation":"

The AMI version of the Amazon EKS optimized AMI to use with your node group. By default, the latest available AMI version for the node group's current Kubernetes version is used. For more information, see Amazon EKS optimized Amazon Linux 2 AMI versions in the Amazon EKS User Guide. If you specify launchTemplate, and your launch template uses a custom AMI, then don't specify releaseVersion, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.

" } } }, @@ -713,6 +1039,33 @@ } } }, + "DeleteAddonRequest":{ + "type":"structure", + "required":[ + "clusterName", + "addonName" + ], + "members":{ + "clusterName":{ + "shape":"ClusterName", + "documentation":"

The name of the cluster to delete the add-on from.

", + "location":"uri", + "locationName":"name" + }, + "addonName":{ + "shape":"String", + "documentation":"

The name of the add-on. The name must match one of the names returned by ListAddons .

", + "location":"uri", + "locationName":"addonName" + } + } + }, + "DeleteAddonResponse":{ + "type":"structure", + "members":{ + "addon":{"shape":"Addon"} + } + }, "DeleteClusterRequest":{ "type":"structure", "required":["name"], @@ -794,6 +1147,81 @@ } } }, + "DescribeAddonRequest":{ + "type":"structure", + "required":[ + "clusterName", + "addonName" + ], + "members":{ + "clusterName":{ + "shape":"ClusterName", + "documentation":"

The name of the cluster.

", + "location":"uri", + "locationName":"name" + }, + "addonName":{ + "shape":"String", + "documentation":"

The name of the add-on. The name must match one of the names returned by ListAddons .

", + "location":"uri", + "locationName":"addonName" + } + } + }, + "DescribeAddonResponse":{ + "type":"structure", + "members":{ + "addon":{"shape":"Addon"} + } + }, + "DescribeAddonVersionsRequest":{ + "type":"structure", + "members":{ + "kubernetesVersion":{ + "shape":"String", + "documentation":"

The Kubernetes versions that the add-on can be used with.

", + "location":"querystring", + "locationName":"kubernetesVersion" + }, + "maxResults":{ + "shape":"DescribeAddonVersionsRequestMaxResults", + "documentation":"

The maximum number of results to return.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"String", + "documentation":"

The nextToken value returned from a previous paginated DescribeAddonVersionsRequest where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

This token should be treated as an opaque identifier that is used only to retrieve the next items in a list and not for other programmatic purposes.

", + "location":"querystring", + "locationName":"nextToken" + }, + "addonName":{ + "shape":"String", + "documentation":"

The name of the add-on. The name must match one of the names returned by ListAddons .

", + "location":"querystring", + "locationName":"addonName" + } + } + }, + "DescribeAddonVersionsRequestMaxResults":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "DescribeAddonVersionsResponse":{ + "type":"structure", + "members":{ + "addons":{ + "shape":"Addons", + "documentation":"

The list of available versions with Kubernetes version compatibility.

" + }, + "nextToken":{ + "shape":"String", + "documentation":"

The nextToken value returned from a previous paginated DescribeAddonVersionsResponse where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

This token should be treated as an opaque identifier that is used only to retrieve the next items in a list and not for other programmatic purposes.

" + } + } + }, "DescribeClusterRequest":{ "type":"structure", "required":["name"], @@ -899,6 +1327,12 @@ "documentation":"

The name of the Amazon EKS node group associated with the update.

", "location":"querystring", "locationName":"nodegroupName" + }, + "addonName":{ + "shape":"String", + "documentation":"

The name of the add-on. The name must match one of the names returned by ListAddons .

", + "location":"querystring", + "locationName":"addonName" } } }, @@ -944,7 +1378,9 @@ "NodeCreationFailure", "PodEvictionFailure", "InsufficientFreeAddresses", - "ClusterUnreachable" + "ClusterUnreachable", + "InsufficientNumberOfReplicas", + "ConfigurationConflict" ] }, "ErrorDetail":{ @@ -1075,6 +1511,7 @@ "shape":"String", "documentation":"

The Fargate profile associated with the exception.

" }, + "addonName":{"shape":"String"}, "message":{"shape":"String"} }, "documentation":"

The specified parameter is invalid. Review the available parameters for the API request.

", @@ -1092,6 +1529,7 @@ "shape":"String", "documentation":"

The Amazon EKS managed node group associated with the exception.

" }, + "addonName":{"shape":"String"}, "message":{"shape":"String"} }, "documentation":"

The request is invalid given the state of the cluster. Check the state of the cluster and the associated operations.

", @@ -1103,7 +1541,7 @@ "members":{ "code":{ "shape":"NodegroupIssueCode", - "documentation":"

A brief description of the error.

  • AutoScalingGroupNotFound: We couldn't find the Auto Scaling group associated with the managed node group. You may be able to recreate an Auto Scaling group with the same settings to recover.

  • Ec2SecurityGroupNotFound: We couldn't find the cluster security group for the cluster. You must recreate your cluster.

  • Ec2SecurityGroupDeletionFailure: We could not delete the remote access security group for your managed node group. Remove any dependencies from the security group.

  • Ec2LaunchTemplateNotFound: We couldn't find the Amazon EC2 launch template for your managed node group. You may be able to recreate a launch template with the same settings to recover.

  • Ec2LaunchTemplateVersionMismatch: The Amazon EC2 launch template version for your managed node group does not match the version that Amazon EKS created. You may be able to revert to the version that Amazon EKS created to recover.

  • Ec2SubnetInvalidConfiguration: One or more Amazon EC2 subnets specified for a node group do not automatically assign public IP addresses to instances launched into it. If you want your instances to be assigned a public IP address, then you need to enable the auto-assign public IP address setting for the subnet. See Modifying the public IPv4 addressing attribute for your subnet in the Amazon VPC User Guide.

  • IamInstanceProfileNotFound: We couldn't find the IAM instance profile for your managed node group. You may be able to recreate an instance profile with the same settings to recover.

  • IamNodeRoleNotFound: We couldn't find the IAM role for your managed node group. You may be able to recreate an IAM role with the same settings to recover.

  • AsgInstanceLaunchFailures: Your Auto Scaling group is experiencing failures while attempting to launch instances.

  • NodeCreationFailure: Your launched instances are unable to register with your Amazon EKS cluster. Common causes of this failure are insufficient worker node IAM role permissions or lack of outbound internet access for the nodes.

  • InstanceLimitExceeded: Your AWS account is unable to launch any more instances of the specified instance type. You may be able to request an Amazon EC2 instance limit increase to recover.

  • InsufficientFreeAddresses: One or more of the subnets associated with your managed node group does not have enough available IP addresses for new nodes.

  • AccessDenied: Amazon EKS or one or more of your managed nodes is unable to communicate with your cluster API server.

  • InternalFailure: These errors are usually caused by an Amazon EKS server-side issue.

" + "documentation":"

A brief description of the error.

  • AccessDenied: Amazon EKS or one or more of your managed nodes is failing to authenticate or authorize with your Kubernetes cluster API server.

  • AsgInstanceLaunchFailures: Your Auto Scaling group is experiencing failures while attempting to launch instances.

  • AutoScalingGroupNotFound: We couldn't find the Auto Scaling group associated with the managed node group. You may be able to recreate an Auto Scaling group with the same settings to recover.

  • ClusterUnreachable: Amazon EKS or one or more of your managed nodes is unable to to communicate with your Kubernetes cluster API server. This can happen if there are network disruptions or if API servers are timing out processing requests.

  • Ec2LaunchTemplateNotFound: We couldn't find the Amazon EC2 launch template for your managed node group. You may be able to recreate a launch template with the same settings to recover.

  • Ec2LaunchTemplateVersionMismatch: The Amazon EC2 launch template version for your managed node group does not match the version that Amazon EKS created. You may be able to revert to the version that Amazon EKS created to recover.

  • Ec2SecurityGroupDeletionFailure: We could not delete the remote access security group for your managed node group. Remove any dependencies from the security group.

  • Ec2SecurityGroupNotFound: We couldn't find the cluster security group for the cluster. You must recreate your cluster.

  • Ec2SubnetInvalidConfiguration: One or more Amazon EC2 subnets specified for a node group do not automatically assign public IP addresses to instances launched into it. If you want your instances to be assigned a public IP address, then you need to enable the auto-assign public IP address setting for the subnet. See Modifying the public IPv4 addressing attribute for your subnet in the Amazon VPC User Guide.

  • IamInstanceProfileNotFound: We couldn't find the IAM instance profile for your managed node group. You may be able to recreate an instance profile with the same settings to recover.

  • IamNodeRoleNotFound: We couldn't find the IAM role for your managed node group. You may be able to recreate an IAM role with the same settings to recover.

  • InstanceLimitExceeded: Your AWS account is unable to launch any more instances of the specified instance type. You may be able to request an Amazon EC2 instance limit increase to recover.

  • InsufficientFreeAddresses: One or more of the subnets associated with your managed node group does not have enough available IP addresses for new nodes.

  • InternalFailure: These errors are usually caused by an Amazon EKS server-side issue.

  • NodeCreationFailure: Your launched instances are unable to register with your Amazon EKS cluster. Common causes of this failure are insufficient worker node IAM role permissions or lack of outbound internet access for the nodes.

" }, "message":{ "shape":"String", @@ -1135,7 +1573,7 @@ "members":{ "serviceIpv4Cidr":{ "shape":"String", - "documentation":"

The CIDR block that Kubernetes service IP addresses are assigned from. If you didn't specify a CIDR block, then Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks. If this was specified, then it was specified when the cluster was created and it cannot be changed.

" + "documentation":"

The CIDR block that Kubernetes service IP addresses are assigned from. If you didn't specify a CIDR block when you created the cluster, then Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks. If this was specified, then it was specified when the cluster was created and it cannot be changed.

" } }, "documentation":"

The Kubernetes network configuration for the cluster.

" @@ -1158,6 +1596,49 @@ }, "documentation":"

An object representing a node group launch template specification. The launch template cannot include SubnetId , IamInstanceProfile , RequestSpotInstances , HibernationOptions , or TerminateInstances , or the node group deployment or update will fail. For more information about launch templates, see CreateLaunchTemplate in the Amazon EC2 API Reference. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.

Specify either name or id, but not both.

" }, + "ListAddonsRequest":{ + "type":"structure", + "required":["clusterName"], + "members":{ + "clusterName":{ + "shape":"ClusterName", + "documentation":"

The name of the cluster.

", + "location":"uri", + "locationName":"name" + }, + "maxResults":{ + "shape":"ListAddonsRequestMaxResults", + "documentation":"

The maximum number of add-on results returned by ListAddonsRequest in paginated output. When you use this parameter, ListAddonsRequest returns only maxResults results in a single page along with a nextToken response element. You can see the remaining results of the initial request by sending another ListAddonsRequest request with the returned nextToken value. This value can be between 1 and 100. If you don't use this parameter, ListAddonsRequest returns up to 100 results and a nextToken value, if applicable.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"String", + "documentation":"

The nextToken value returned from a previous paginated ListAddonsRequest where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

This token should be treated as an opaque identifier that is used only to retrieve the next items in a list and not for other programmatic purposes.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListAddonsRequestMaxResults":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListAddonsResponse":{ + "type":"structure", + "members":{ + "addons":{ + "shape":"StringList", + "documentation":"

A list of available add-ons.

" + }, + "nextToken":{ + "shape":"String", + "documentation":"

The nextToken value returned from a previous paginated ListAddonsResponse where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

This token should be treated as an opaque identifier that is used only to retrieve the next items in a list and not for other programmatic purposes.

" + } + } + }, "ListClustersRequest":{ "type":"structure", "members":{ @@ -1311,6 +1792,12 @@ "location":"querystring", "locationName":"nodegroupName" }, + "addonName":{ + "shape":"String", + "documentation":"

The names of the installed add-ons that have available updates.

", + "location":"querystring", + "locationName":"addonName" + }, "nextToken":{ "shape":"String", "documentation":"

The nextToken value returned from a previous paginated ListUpdates request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

", @@ -1407,7 +1894,7 @@ }, "releaseVersion":{ "shape":"String", - "documentation":"

If the node group was deployed using a launch template with a custom AMI, then this is the AMI ID that was specified in the launch template. For node groups that weren't deployed using a launch template, this is the version of the Amazon EKS-optimized AMI that the node group was deployed with.

" + "documentation":"

If the node group was deployed using a launch template with a custom AMI, then this is the AMI ID that was specified in the launch template. For node groups that weren't deployed using a launch template, this is the version of the Amazon EKS optimized AMI that the node group was deployed with.

" }, "createdAt":{ "shape":"Timestamp", @@ -1421,6 +1908,10 @@ "shape":"NodegroupStatus", "documentation":"

The current status of the managed node group.

" }, + "capacityType":{ + "shape":"CapacityTypes", + "documentation":"

The capacity type of your managed node group.

" + }, "scalingConfig":{ "shape":"NodegroupScalingConfig", "documentation":"

The scaling configuration details for the Auto Scaling group that is associated with your node group.

" @@ -1592,6 +2083,13 @@ }, "documentation":"

An object representing the remote access configuration for the managed node group.

" }, + "ResolveConflicts":{ + "type":"string", + "enum":[ + "OVERWRITE", + "NONE" + ] + }, "ResourceInUseException":{ "type":"structure", "members":{ @@ -1603,6 +2101,7 @@ "shape":"String", "documentation":"

The Amazon EKS managed node group associated with the exception.

" }, + "addonName":{"shape":"String"}, "message":{"shape":"String"} }, "documentation":"

The specified resource is in use.

", @@ -1641,12 +2140,18 @@ "shape":"String", "documentation":"

The Fargate profile associated with the exception.

" }, + "addonName":{"shape":"String"}, "message":{"shape":"String"} }, "documentation":"

The specified resource could not be found. You can view your available clusters with ListClusters. You can view your available managed node groups with ListNodegroups. Amazon EKS clusters and node groups are Region-specific.

", "error":{"httpStatusCode":404}, "exception":true }, + "RoleArn":{ + "type":"string", + "max":255, + "min":1 + }, "ServerException":{ "type":"structure", "members":{ @@ -1658,6 +2163,7 @@ "shape":"String", "documentation":"

The Amazon EKS managed node group associated with the exception.

" }, + "addonName":{"shape":"String"}, "message":{"shape":"String"} }, "documentation":"

These errors are usually caused by a server-side issue.

", @@ -1804,6 +2310,50 @@ }, "documentation":"

An object representing an asynchronous update.

" }, + "UpdateAddonRequest":{ + "type":"structure", + "required":[ + "clusterName", + "addonName" + ], + "members":{ + "clusterName":{ + "shape":"ClusterName", + "documentation":"

The name of the cluster.

", + "location":"uri", + "locationName":"name" + }, + "addonName":{ + "shape":"String", + "documentation":"

The name of the add-on. The name must match one of the names returned by ListAddons .

", + "location":"uri", + "locationName":"addonName" + }, + "addonVersion":{ + "shape":"String", + "documentation":"

The version of the add-on. The version must match one of the versions returned by DescribeAddonVersions .

" + }, + "serviceAccountRoleArn":{ + "shape":"RoleArn", + "documentation":"

The Amazon Resource Name (ARN) of an existing IAM role to bind to the add-on's service account. The role must be assigned the IAM permissions required by the add-on. If you don't specify an existing IAM role, then the add-on uses the permissions assigned to the node IAM role. For more information, see Amazon EKS node IAM role in the Amazon EKS User Guide.

To specify an existing IAM role, you must have an IAM OpenID Connect (OIDC) provider created for your cluster. For more information, see Enabling IAM roles for service accounts on your cluster in the Amazon EKS User Guide.

" + }, + "resolveConflicts":{ + "shape":"ResolveConflicts", + "documentation":"

How to resolve parameter value conflicts when applying the new version of the add-on to the cluster.

" + }, + "clientRequestToken":{ + "shape":"String", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

", + "idempotencyToken":true + } + } + }, + "UpdateAddonResponse":{ + "type":"structure", + "members":{ + "update":{"shape":"Update"} + } + }, "UpdateClusterConfigRequest":{ "type":"structure", "required":["name"], @@ -1944,7 +2494,7 @@ }, "releaseVersion":{ "shape":"String", - "documentation":"

The AMI version of the Amazon EKS-optimized AMI to use for the update. By default, the latest available AMI version for the node group's Kubernetes version is used. For more information, see Amazon EKS-Optimized Linux AMI Versions in the Amazon EKS User Guide. If you specify launchTemplate, and your launch template uses a custom AMI, then don't specify releaseVersion, or the node group update will fail. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.

" + "documentation":"

The AMI version of the Amazon EKS optimized AMI to use for the update. By default, the latest available AMI version for the node group's Kubernetes version is used. For more information, see Amazon EKS optimized Amazon Linux 2 AMI versions in the Amazon EKS User Guide. If you specify launchTemplate, and your launch template uses a custom AMI, then don't specify releaseVersion, or the node group update will fail. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.

" }, "launchTemplate":{ "shape":"LaunchTemplateSpecification", @@ -1995,7 +2545,10 @@ "MaxSize", "MinSize", "ReleaseVersion", - "PublicAccessCidrs" + "PublicAccessCidrs", + "AddonVersion", + "ServiceAccountRoleArn", + "ResolveConflicts" ] }, "UpdateParams":{ @@ -2017,7 +2570,8 @@ "VersionUpdate", "EndpointAccessUpdate", "LoggingUpdate", - "ConfigUpdate" + "ConfigUpdate", + "AddonUpdate" ] }, "VpcConfigRequest":{ @@ -2029,7 +2583,7 @@ }, "securityGroupIds":{ "shape":"StringList", - "documentation":"

Specify one or more security groups for the cross-account elastic network interfaces that Amazon EKS creates to use to allow communication between your worker nodes and the Kubernetes control plane. If you don't specify a security group, the default security group for your VPC is used.

" + "documentation":"

Specify one or more security groups for the cross-account elastic network interfaces that Amazon EKS creates to use to allow communication between your worker nodes and the Kubernetes control plane. If you don't specify any security groups, then familiarize yourself with the difference between Amazon EKS defaults for clusters deployed with Kubernetes:

  • 1.14 Amazon EKS platform version eks.2 and earlier

  • 1.14 Amazon EKS platform version eks.3 and later

For more information, see Amazon EKS security group considerations in the Amazon EKS User Guide .

" }, "endpointPublicAccess":{ "shape":"BoxedBoolean", diff --git a/services/eks/src/main/resources/codegen-resources/waiters-2.json b/services/eks/src/main/resources/codegen-resources/waiters-2.json index 449d2296c391..6383d079325f 100644 --- a/services/eks/src/main/resources/codegen-resources/waiters-2.json +++ b/services/eks/src/main/resources/codegen-resources/waiters-2.json @@ -86,6 +86,43 @@ "state": "success" } ] + }, + "AddonActive": { + "delay": 10, + "operation": "DescribeAddon", + "maxAttempts": 60, + "acceptors": [ + { + "expected": "CREATE_FAILED", + "matcher": "path", + "state": "failure", + "argument": "addon.status" + }, + { + "expected": "ACTIVE", + "matcher": "path", + "state": "success", + "argument": "addon.status" + } + ] + }, + "AddonDeleted": { + "delay": 10, + "operation": "DescribeAddon", + "maxAttempts": 60, + "acceptors": [ + { + "expected": "DELETE_FAILED", + "matcher": "path", + "state": "failure", + "argument": "addon.status" + }, + { + "expected": "ResourceNotFoundException", + "matcher": "error", + "state": "success" + } + ] } } } diff --git a/services/elasticache/pom.xml b/services/elasticache/pom.xml index 7d6554cb8a6d..44e1db9489f7 100644 --- a/services/elasticache/pom.xml +++ b/services/elasticache/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT elasticache AWS Java SDK :: Services :: Amazon ElastiCache diff --git a/services/elasticache/src/main/resources/codegen-resources/service-2.json b/services/elasticache/src/main/resources/codegen-resources/service-2.json index cc9118328ab5..1cf7fd5bf33a 100644 --- a/services/elasticache/src/main/resources/codegen-resources/service-2.json +++ b/services/elasticache/src/main/resources/codegen-resources/service-2.json @@ -307,7 +307,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], - "documentation":"

For Redis engine version 6.04 onwards: Creates a Redis user. For more information, see Using Role Based Access Control (RBAC).

" + "documentation":"

For Redis engine version 6.x onwards: Creates a Redis user. For more information, see Using Role Based Access Control (RBAC).

" }, "CreateUserGroup":{ "name":"CreateUserGroup", @@ -328,7 +328,7 @@ {"shape":"UserGroupQuotaExceededFault"}, {"shape":"InvalidParameterValueException"} ], - "documentation":"

For Redis engine version 6.04 onwards: Creates a Redis user group. For more information, see Using Role Based Access Control (RBAC)

" + "documentation":"

For Redis engine version 6.x onwards: Creates a Redis user group. For more information, see Using Role Based Access Control (RBAC)

" }, "DecreaseNodeGroupsInGlobalReplicationGroup":{ "name":"DecreaseNodeGroupsInGlobalReplicationGroup", @@ -517,7 +517,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"DefaultUserAssociatedToUserGroupFault"} ], - "documentation":"

For Redis engine version 6.04 onwards: Deletes a user. The user will be removed from all user groups and in turn removed from all replication groups. For more information, see Using Role Based Access Control (RBAC).

" + "documentation":"

For Redis engine version 6.x onwards: Deletes a user. The user will be removed from all user groups and in turn removed from all replication groups. For more information, see Using Role Based Access Control (RBAC).

" }, "DeleteUserGroup":{ "name":"DeleteUserGroup", @@ -535,7 +535,7 @@ {"shape":"InvalidUserGroupStateFault"}, {"shape":"InvalidParameterValueException"} ], - "documentation":"

For Redis engine version 6.04 onwards: Deletes a ser group. The user group must first be disassociated from the replcation group before it can be deleted. For more information, see Using Role Based Access Control (RBAC).

" + "documentation":"

For Redis engine version 6.x onwards: Deletes a ser group. The user group must first be disassociated from the replcation group before it can be deleted. For more information, see Using Role Based Access Control (RBAC).

" }, "DescribeCacheClusters":{ "name":"DescribeCacheClusters", @@ -1288,7 +1288,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], - "documentation":"

Represents the input of a TestFailover operation which test automatic failover on a specified node group (called shard in the console) in a replication group (called cluster in the console).

Note the following

  • A customer can use this operation to test automatic failover on up to 5 shards (called node groups in the ElastiCache API and AWS CLI) in any rolling 24-hour period.

  • If calling this operation on shards in different clusters (called replication groups in the API and CLI), the calls can be made concurrently.

  • If calling this operation multiple times on different shards in the same Redis (cluster mode enabled) replication group, the first node replacement must complete before a subsequent call can be made.

  • To determine whether the node replacement is complete you can check Events using the Amazon ElastiCache console, the AWS CLI, or the ElastiCache API. Look for the following automatic failover related events, listed here in order of occurrance:

    1. Replication group message: Test Failover API called for node group <node-group-id>

    2. Cache cluster message: Failover from master node <primary-node-id> to replica node <node-id> completed

    3. Replication group message: Failover from master node <primary-node-id> to replica node <node-id> completed

    4. Cache cluster message: Recovering cache nodes <node-id>

    5. Cache cluster message: Finished recovery for cache nodes <node-id>

    For more information see:

Also see, Testing Multi-AZ in the ElastiCache User Guide.

" + "documentation":"

Represents the input of a TestFailover operation which test automatic failover on a specified node group (called shard in the console) in a replication group (called cluster in the console).

Note the following

  • A customer can use this operation to test automatic failover on up to 5 shards (called node groups in the ElastiCache API and AWS CLI) in any rolling 24-hour period.

  • If calling this operation on shards in different clusters (called replication groups in the API and CLI), the calls can be made concurrently.

  • If calling this operation multiple times on different shards in the same Redis (cluster mode enabled) replication group, the first node replacement must complete before a subsequent call can be made.

  • To determine whether the node replacement is complete you can check Events using the Amazon ElastiCache console, the AWS CLI, or the ElastiCache API. Look for the following automatic failover related events, listed here in order of occurrance:

    1. Replication group message: Test Failover API called for node group <node-group-id>

    2. Cache cluster message: Failover from primary node <primary-node-id> to replica node <node-id> completed

    3. Replication group message: Failover from primary node <primary-node-id> to replica node <node-id> completed

    4. Cache cluster message: Recovering cache nodes <node-id>

    5. Cache cluster message: Finished recovery for cache nodes <node-id>

    For more information see:

Also see, Testing Multi-AZ in the ElastiCache User Guide.

" } }, "shapes":{ @@ -1525,7 +1525,7 @@ }, "CacheNodeType":{ "shape":"String", - "documentation":"

The name of the compute and memory capacity node type for the cluster.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

      T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

    • Previous generation: (not recommended)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

  • Compute optimized:

    • Previous generation: (not recommended)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge

      R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge

    • Previous generation: (not recommended)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

Additional node type info

  • All current generation instance types are created in Amazon VPC by default.

  • Redis append-only files (AOF) are not supported for T1 or T2 instances.

  • Redis Multi-AZ with automatic failover is not supported on T1 instances.

  • Redis configuration variables appendonly and appendfsync are not supported on Redis version 2.8.22 and later.

" + "documentation":"

The name of the compute and memory capacity node type for the cluster.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).

      cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge

      At this time, M6g node types are available in the following regions: us-east-1, us-west-2, us-east-2, eu-central-1, eu-west-1 and ap-northeast-1.

      M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

      T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

    • Previous generation: (not recommended)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

  • Compute optimized:

    • Previous generation: (not recommended)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).

      cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge

      At this time, R6g node types are available in the following regions: us-east-1, us-west-2, us-east-2, eu-central-1, eu-west-1 and ap-northeast-1.

      R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge

      R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge

    • Previous generation: (not recommended)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

Additional node type info

  • All current generation instance types are created in Amazon VPC by default.

  • Redis append-only files (AOF) are not supported for T1 or T2 instances.

  • Redis Multi-AZ with automatic failover is not supported on T1 instances.

  • Redis configuration variables appendonly and appendfsync are not supported on Redis version 2.8.22 and later.

" }, "Engine":{ "shape":"String", @@ -1687,7 +1687,7 @@ }, "CacheParameterGroupFamily":{ "shape":"String", - "documentation":"

The name of the cache parameter group family associated with this cache engine.

Valid values are: memcached1.4 | memcached1.5 | redis2.6 | redis2.8 | redis3.2 | redis4.0 | redis5.0 |

" + "documentation":"

The name of the cache parameter group family associated with this cache engine.

Valid values are: memcached1.4 | memcached1.5 | memcached1.6 | redis2.6 | redis2.8 | redis3.2 | redis4.0 | redis5.0 | redis6.x |

" }, "CacheEngineDescription":{ "shape":"String", @@ -1757,7 +1757,7 @@ "documentation":"

The customer outpost ARN of the cache node.

" } }, - "documentation":"

Represents an individual cache node within a cluster. Each cache node runs its own instance of the cluster's protocol-compliant caching software - either Memcached or Redis.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

      T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

    • Previous generation: (not recommended)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

  • Compute optimized:

    • Previous generation: (not recommended)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge

      R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge

    • Previous generation: (not recommended)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

Additional node type info

  • All current generation instance types are created in Amazon VPC by default.

  • Redis append-only files (AOF) are not supported for T1 or T2 instances.

  • Redis Multi-AZ with automatic failover is not supported on T1 instances.

  • Redis configuration variables appendonly and appendfsync are not supported on Redis version 2.8.22 and later.

" + "documentation":"

Represents an individual cache node within a cluster. Each cache node runs its own instance of the cluster's protocol-compliant caching software - either Memcached or Redis.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).

      cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge

      At this time, M6g node types are available in the following regions: us-east-1, us-west-2, us-east-2, eu-central-1, eu-west-1 and ap-northeast-1.

      M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

      T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

    • Previous generation: (not recommended)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

  • Compute optimized:

    • Previous generation: (not recommended)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).

      cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge

      At this time, R6g node types are available in the following regions: us-east-1, us-west-2, us-east-2, eu-central-1, eu-west-1 and ap-northeast-1.

      R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge

      R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge

    • Previous generation: (not recommended)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

Additional node type info

  • All current generation instance types are created in Amazon VPC by default.

  • Redis append-only files (AOF) are not supported for T1 or T2 instances.

  • Redis Multi-AZ with automatic failover is not supported on T1 instances.

  • Redis configuration variables appendonly and appendfsync are not supported on Redis version 2.8.22 and later.

" }, "CacheNodeIdsList":{ "type":"list", @@ -1897,7 +1897,7 @@ }, "CacheParameterGroupFamily":{ "shape":"String", - "documentation":"

The name of the cache parameter group family that this cache parameter group is compatible with.

Valid values are: memcached1.4 | memcached1.5 | redis2.6 | redis2.8 | redis3.2 | redis4.0 | redis5.0 |

" + "documentation":"

The name of the cache parameter group family that this cache parameter group is compatible with.

Valid values are: memcached1.4 | memcached1.5 | memcached1.6 | redis2.6 | redis2.8 | redis3.2 | redis4.0 | redis5.0 | redis6.x |

" }, "Description":{ "shape":"String", @@ -2327,7 +2327,7 @@ }, "TargetBucket":{ "shape":"String", - "documentation":"

The Amazon S3 bucket to which the snapshot is exported. This parameter is used only when exporting a snapshot for external access.

When using this parameter to export a snapshot, be sure Amazon ElastiCache has the needed permissions to this S3 bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the Amazon ElastiCache User Guide.

For more information, see Exporting a Snapshot in the Amazon ElastiCache User Guide.

" + "documentation":"

The Amazon S3 bucket to which the snapshot is exported. This parameter is used only when exporting a snapshot for external access.

When using this parameter to export a snapshot, be sure Amazon ElastiCache has the needed permissions to this S3 bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the Amazon ElastiCache User Guide.

For more information, see Exporting a Snapshot in the Amazon ElastiCache User Guide.

" }, "KmsKeyId":{ "shape":"String", @@ -2372,7 +2372,7 @@ }, "CacheNodeType":{ "shape":"String", - "documentation":"

The compute and memory capacity of the nodes in the node group (shard).

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

      T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

    • Previous generation: (not recommended)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

  • Compute optimized:

    • Previous generation: (not recommended)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge

      R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge

    • Previous generation: (not recommended)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

Additional node type info

  • All current generation instance types are created in Amazon VPC by default.

  • Redis append-only files (AOF) are not supported for T1 or T2 instances.

  • Redis Multi-AZ with automatic failover is not supported on T1 instances.

  • Redis configuration variables appendonly and appendfsync are not supported on Redis version 2.8.22 and later.

" + "documentation":"

The compute and memory capacity of the nodes in the node group (shard).

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).

      cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge

      At this time, M6g node types are available in the following regions: us-east-1, us-west-2, us-east-2, eu-central-1, eu-west-1 and ap-northeast-1.

      M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

      T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

    • Previous generation: (not recommended)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

  • Compute optimized:

    • Previous generation: (not recommended)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).

      cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge

      At this time, R6g node types are available in the following regions: us-east-1, us-west-2, us-east-2, eu-central-1, eu-west-1 and ap-northeast-1.

      R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge

      R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge

    • Previous generation: (not recommended)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

Additional node type info

  • All current generation instance types are created in Amazon VPC by default.

  • Redis append-only files (AOF) are not supported for T1 or T2 instances.

  • Redis Multi-AZ with automatic failover is not supported on T1 instances.

  • Redis configuration variables appendonly and appendfsync are not supported on Redis version 2.8.22 and later.

" }, "Engine":{ "shape":"String", @@ -2473,7 +2473,7 @@ }, "CacheParameterGroupFamily":{ "shape":"String", - "documentation":"

The name of the cache parameter group family that the cache parameter group can be used with.

Valid values are: memcached1.4 | memcached1.5 | redis2.6 | redis2.8 | redis3.2 | redis4.0 | redis5.0 |

" + "documentation":"

The name of the cache parameter group family that the cache parameter group can be used with.

Valid values are: memcached1.4 | memcached1.5 | memcached1.6 | redis2.6 | redis2.8 | redis3.2 | redis4.0 | redis5.0 | redis6.x |

" }, "Description":{ "shape":"String", @@ -2621,7 +2621,7 @@ }, "CacheNodeType":{ "shape":"String", - "documentation":"

The compute and memory capacity of the nodes in the node group (shard).

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

      T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

    • Previous generation: (not recommended)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

  • Compute optimized:

    • Previous generation: (not recommended)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge

      R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge

    • Previous generation: (not recommended)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

Additional node type info

  • All current generation instance types are created in Amazon VPC by default.

  • Redis append-only files (AOF) are not supported for T1 or T2 instances.

  • Redis Multi-AZ with automatic failover is not supported on T1 instances.

  • Redis configuration variables appendonly and appendfsync are not supported on Redis version 2.8.22 and later.

" + "documentation":"

The compute and memory capacity of the nodes in the node group (shard).

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).

      cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge

      At this time, M6g node types are available in the following regions: us-east-1, us-west-2, us-east-2, eu-central-1, eu-west-1 and ap-northeast-1.

      M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

      T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

    • Previous generation: (not recommended)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

  • Compute optimized:

    • Previous generation: (not recommended)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).

      cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge

      At this time, R6g node types are available in the following regions: us-east-1, us-west-2, us-east-2, eu-central-1, eu-west-1 and ap-northeast-1.

      R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge

      R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge

    • Previous generation: (not recommended)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

Additional node type info

  • All current generation instance types are created in Amazon VPC by default.

  • Redis append-only files (AOF) are not supported for T1 or T2 instances.

  • Redis Multi-AZ with automatic failover is not supported on T1 instances.

  • Redis configuration variables appendonly and appendfsync are not supported on Redis version 2.8.22 and later.

" }, "Engine":{ "shape":"String", @@ -3088,7 +3088,7 @@ }, "CacheParameterGroupFamily":{ "shape":"String", - "documentation":"

The name of a specific cache parameter group family to return details for.

Valid values are: memcached1.4 | memcached1.5 | redis2.6 | redis2.8 | redis3.2 | redis4.0 | redis5.0 |

Constraints:

  • Must be 1 to 255 alphanumeric characters

  • First character must be a letter

  • Cannot end with a hyphen or contain two consecutive hyphens

" + "documentation":"

The name of a specific cache parameter group family to return details for.

Valid values are: memcached1.4 | memcached1.5 | memcached1.6 | redis2.6 | redis2.8 | redis3.2 | redis4.0 | redis5.0 | redis6.x |

Constraints:

  • Must be 1 to 255 alphanumeric characters

  • First character must be a letter

  • Cannot end with a hyphen or contain two consecutive hyphens

" }, "MaxRecords":{ "shape":"IntegerOptional", @@ -3188,7 +3188,7 @@ "members":{ "CacheParameterGroupFamily":{ "shape":"String", - "documentation":"

The name of the cache parameter group family.

Valid values are: memcached1.4 | memcached1.5 | redis2.6 | redis2.8 | redis3.2 | redis4.0 | redis5.0 |

" + "documentation":"

The name of the cache parameter group family.

Valid values are: memcached1.4 | memcached1.5 | memcached1.6 | redis2.6 | redis2.8 | redis3.2 | redis4.0 | redis5.0 | redis6.x |

" }, "MaxRecords":{ "shape":"IntegerOptional", @@ -3306,7 +3306,7 @@ }, "CacheNodeType":{ "shape":"String", - "documentation":"

The cache node type filter value. Use this parameter to show only those reservations matching the specified cache node type.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

      T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

    • Previous generation: (not recommended)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

  • Compute optimized:

    • Previous generation: (not recommended)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge

      R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge

    • Previous generation: (not recommended)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

Additional node type info

  • All current generation instance types are created in Amazon VPC by default.

  • Redis append-only files (AOF) are not supported for T1 or T2 instances.

  • Redis Multi-AZ with automatic failover is not supported on T1 instances.

  • Redis configuration variables appendonly and appendfsync are not supported on Redis version 2.8.22 and later.

" + "documentation":"

The cache node type filter value. Use this parameter to show only those reservations matching the specified cache node type.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).

      cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge

      At this time, M6g node types are available in the following regions: us-east-1, us-west-2, us-east-2, eu-central-1, eu-west-1 and ap-northeast-1.

      M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

      T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

    • Previous generation: (not recommended)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

  • Compute optimized:

    • Previous generation: (not recommended)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).

      cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge

      At this time, R6g node types are available in the following regions: us-east-1, us-west-2, us-east-2, eu-central-1, eu-west-1 and ap-northeast-1.

      R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge

      R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge

    • Previous generation: (not recommended)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

Additional node type info

  • All current generation instance types are created in Amazon VPC by default.

  • Redis append-only files (AOF) are not supported for T1 or T2 instances.

  • Redis Multi-AZ with automatic failover is not supported on T1 instances.

  • Redis configuration variables appendonly and appendfsync are not supported on Redis version 2.8.22 and later.

" }, "Duration":{ "shape":"String", @@ -3318,7 +3318,7 @@ }, "OfferingType":{ "shape":"String", - "documentation":"

The offering type filter value. Use this parameter to show only the available offerings matching the specified offering type.

Valid values: \"Light Utilization\"|\"Medium Utilization\"|\"Heavy Utilization\"

" + "documentation":"

The offering type filter value. Use this parameter to show only the available offerings matching the specified offering type.

Valid values: \"Light Utilization\"|\"Medium Utilization\"|\"Heavy Utilization\"|\"All Upfront\"|\"Partial Upfront\"| \"No Upfront\"

" }, "MaxRecords":{ "shape":"IntegerOptional", @@ -3340,7 +3340,7 @@ }, "CacheNodeType":{ "shape":"String", - "documentation":"

The cache node type filter value. Use this parameter to show only the available offerings matching the specified cache node type.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

      T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

    • Previous generation: (not recommended)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

  • Compute optimized:

    • Previous generation: (not recommended)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge

      R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge

    • Previous generation: (not recommended)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

Additional node type info

  • All current generation instance types are created in Amazon VPC by default.

  • Redis append-only files (AOF) are not supported for T1 or T2 instances.

  • Redis Multi-AZ with automatic failover is not supported on T1 instances.

  • Redis configuration variables appendonly and appendfsync are not supported on Redis version 2.8.22 and later.

" + "documentation":"

The cache node type filter value. Use this parameter to show only the available offerings matching the specified cache node type.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).

      cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge

      At this time, M6g node types are available in the following regions: us-east-1, us-west-2, us-east-2, eu-central-1, eu-west-1 and ap-northeast-1.

      M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

      T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

    • Previous generation: (not recommended)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

  • Compute optimized:

    • Previous generation: (not recommended)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).

      cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge

      At this time, R6g node types are available in the following regions: us-east-1, us-west-2, us-east-2, eu-central-1, eu-west-1 and ap-northeast-1.

      R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge

      R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge

    • Previous generation: (not recommended)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

Additional node type info

  • All current generation instance types are created in Amazon VPC by default.

  • Redis append-only files (AOF) are not supported for T1 or T2 instances.

  • Redis Multi-AZ with automatic failover is not supported on T1 instances.

  • Redis configuration variables appendonly and appendfsync are not supported on Redis version 2.8.22 and later.

" }, "Duration":{ "shape":"String", @@ -3352,7 +3352,7 @@ }, "OfferingType":{ "shape":"String", - "documentation":"

The offering type filter value. Use this parameter to show only the available offerings matching the specified offering type.

Valid Values: \"Light Utilization\"|\"Medium Utilization\"|\"Heavy Utilization\"

" + "documentation":"

The offering type filter value. Use this parameter to show only the available offerings matching the specified offering type.

Valid Values: \"Light Utilization\"|\"Medium Utilization\"|\"Heavy Utilization\" |\"All Upfront\"|\"Partial Upfront\"| \"No Upfront\"

" }, "MaxRecords":{ "shape":"IntegerOptional", @@ -3632,7 +3632,7 @@ "members":{ "CacheParameterGroupFamily":{ "shape":"String", - "documentation":"

Specifies the name of the cache parameter group family to which the engine default parameters apply.

Valid values are: memcached1.4 | memcached1.5 | redis2.6 | redis2.8 | redis3.2 | redis4.0 | redis5.0 |

" + "documentation":"

Specifies the name of the cache parameter group family to which the engine default parameters apply.

Valid values are: memcached1.4 | memcached1.5 | memcached1.6 | redis2.6 | redis2.8 | redis3.2 | redis4.0 | redis5.0 | redis6.x |

" }, "Marker":{ "shape":"String", @@ -3813,7 +3813,7 @@ }, "EngineVersion":{ "shape":"String", - "documentation":"

The Elasticache Redis engine version. For preview, it is Redis version 5.0.5 only.

" + "documentation":"

The Elasticache Redis engine version.

" }, "Members":{ "shape":"GlobalReplicationGroupMemberList", @@ -5457,7 +5457,7 @@ }, "CacheNodeType":{ "shape":"String", - "documentation":"

The cache node type for the reserved cache nodes.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

      T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

    • Previous generation: (not recommended)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

  • Compute optimized:

    • Previous generation: (not recommended)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge

      R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge

    • Previous generation: (not recommended)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

Additional node type info

  • All current generation instance types are created in Amazon VPC by default.

  • Redis append-only files (AOF) are not supported for T1 or T2 instances.

  • Redis Multi-AZ with automatic failover is not supported on T1 instances.

  • Redis configuration variables appendonly and appendfsync are not supported on Redis version 2.8.22 and later.

" + "documentation":"

The cache node type for the reserved cache nodes.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).

      cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge

      At this time, M6g node types are available in the following regions: us-east-1, us-west-2, us-east-2, eu-central-1, eu-west-1 and ap-northeast-1.

      M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

      T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

    • Previous generation: (not recommended)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

  • Compute optimized:

    • Previous generation: (not recommended)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).

      cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge

      At this time, R6g node types are available in the following regions: us-east-1, us-west-2, us-east-2, eu-central-1, eu-west-1 and ap-northeast-1.

      R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge

      R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge

    • Previous generation: (not recommended)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

Additional node type info

  • All current generation instance types are created in Amazon VPC by default.

  • Redis append-only files (AOF) are not supported for T1 or T2 instances.

  • Redis Multi-AZ with automatic failover is not supported on T1 instances.

  • Redis configuration variables appendonly and appendfsync are not supported on Redis version 2.8.22 and later.

" }, "StartTime":{ "shape":"TStamp", @@ -5569,7 +5569,7 @@ }, "CacheNodeType":{ "shape":"String", - "documentation":"

The cache node type for the reserved cache node.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

      T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

    • Previous generation: (not recommended)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

  • Compute optimized:

    • Previous generation: (not recommended)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge

      R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge

    • Previous generation: (not recommended)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

Additional node type info

  • All current generation instance types are created in Amazon VPC by default.

  • Redis append-only files (AOF) are not supported for T1 or T2 instances.

  • Redis Multi-AZ with automatic failover is not supported on T1 instances.

  • Redis configuration variables appendonly and appendfsync are not supported on Redis version 2.8.22 and later.

" + "documentation":"

The cache node type for the reserved cache node.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).

      cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge

      At this time, M6g node types are available in the following regions: us-east-1, us-west-2, us-east-2, eu-central-1, eu-west-1 and ap-northeast-1.

      M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

      T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

    • Previous generation: (not recommended)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

  • Compute optimized:

    • Previous generation: (not recommended)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).

      cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge

      At this time, R6g node types are available in the following regions: us-east-1, us-west-2, us-east-2, eu-central-1, eu-west-1 and ap-northeast-1.

      R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge

      R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge

    • Previous generation: (not recommended)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

Additional node type info

  • All current generation instance types are created in Amazon VPC by default.

  • Redis append-only files (AOF) are not supported for T1 or T2 instances.

  • Redis Multi-AZ with automatic failover is not supported on T1 instances.

  • Redis configuration variables appendonly and appendfsync are not supported on Redis version 2.8.22 and later.

" }, "Duration":{ "shape":"Integer", @@ -5907,7 +5907,7 @@ }, "CacheNodeType":{ "shape":"String", - "documentation":"

The name of the compute and memory capacity node type for the source cluster.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

      T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

    • Previous generation: (not recommended)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

  • Compute optimized:

    • Previous generation: (not recommended)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge

      R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge

    • Previous generation: (not recommended)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

Additional node type info

  • All current generation instance types are created in Amazon VPC by default.

  • Redis append-only files (AOF) are not supported for T1 or T2 instances.

  • Redis Multi-AZ with automatic failover is not supported on T1 instances.

  • Redis configuration variables appendonly and appendfsync are not supported on Redis version 2.8.22 and later.

" + "documentation":"

The name of the compute and memory capacity node type for the source cluster.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

  • General purpose:

    • Current generation:

      M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).

      cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge

      At this time, M6g node types are available in the following regions: us-east-1, us-west-2, us-east-2, eu-central-1, eu-west-1 and ap-northeast-1.

      M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge

      M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge

      T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium

      T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium

    • Previous generation: (not recommended)

      T1 node types: cache.t1.micro

      M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge

      M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge

  • Compute optimized:

    • Previous generation: (not recommended)

      C1 node types: cache.c1.xlarge

  • Memory optimized:

    • Current generation:

      R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).

      cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge

      At this time, R6g node types are available in the following regions: us-east-1, us-west-2, us-east-2, eu-central-1, eu-west-1 and ap-northeast-1.

      R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge

      R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge

    • Previous generation: (not recommended)

      M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge

      R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge

Additional node type info

  • All current generation instance types are created in Amazon VPC by default.

  • Redis append-only files (AOF) are not supported for T1 or T2 instances.

  • Redis Multi-AZ with automatic failover is not supported on T1 instances.

  • Redis configuration variables appendonly and appendfsync are not supported on Redis version 2.8.22 and later.

" }, "Engine":{ "shape":"String", diff --git a/services/elasticbeanstalk/pom.xml b/services/elasticbeanstalk/pom.xml index ace224070889..14cc64324b6b 100644 --- a/services/elasticbeanstalk/pom.xml +++ b/services/elasticbeanstalk/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT elasticbeanstalk AWS Java SDK :: Services :: AWS Elastic Beanstalk diff --git a/services/elasticbeanstalk/src/main/resources/codegen-resources/service-2.json b/services/elasticbeanstalk/src/main/resources/codegen-resources/service-2.json index 249e5a1cc288..f2b2b94f7474 100755 --- a/services/elasticbeanstalk/src/main/resources/codegen-resources/service-2.json +++ b/services/elasticbeanstalk/src/main/resources/codegen-resources/service-2.json @@ -1947,7 +1947,7 @@ "documentation":"

The pagination token returned by a previous request.

" }, "MaxItems":{ - "shape":"Integer", + "shape":"ManagedActionHistoryMaxItems", "documentation":"

The maximum number of items to return for a single request.

" } }, @@ -2462,8 +2462,11 @@ "EnvironmentStatus":{ "type":"string", "enum":[ + "Aborting", "Launching", "Updating", + "LinkingFrom", + "LinkingTo", "Ready", "Terminating", "Terminated" @@ -2970,6 +2973,11 @@ "max":100, "min":1 }, + "ManagedActionHistoryMaxItems":{ + "type":"integer", + "max":100, + "min":1 + }, "ManagedActionInvalidStateException":{ "type":"structure", "members":{ diff --git a/services/elasticinference/pom.xml b/services/elasticinference/pom.xml index 856fd80a9fda..3f56a7fcd5d7 100644 --- a/services/elasticinference/pom.xml +++ b/services/elasticinference/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT elasticinference AWS Java SDK :: Services :: Elastic Inference diff --git a/services/elasticloadbalancing/pom.xml b/services/elasticloadbalancing/pom.xml index a72d291222a8..af3b1a66683b 100644 --- a/services/elasticloadbalancing/pom.xml +++ b/services/elasticloadbalancing/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT elasticloadbalancing AWS Java SDK :: Services :: Elastic Load Balancing diff --git a/services/elasticloadbalancingv2/pom.xml b/services/elasticloadbalancingv2/pom.xml index 9ea2a9f49aee..e36370c1ab2b 100644 --- a/services/elasticloadbalancingv2/pom.xml +++ b/services/elasticloadbalancingv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT elasticloadbalancingv2 AWS Java SDK :: Services :: Elastic Load Balancing V2 diff --git a/services/elasticloadbalancingv2/src/main/resources/codegen-resources/service-2.json b/services/elasticloadbalancingv2/src/main/resources/codegen-resources/service-2.json index c3002376470e..2ea2fdefef37 100644 --- a/services/elasticloadbalancingv2/src/main/resources/codegen-resources/service-2.json +++ b/services/elasticloadbalancingv2/src/main/resources/codegen-resources/service-2.json @@ -28,7 +28,7 @@ {"shape":"TooManyCertificatesException"}, {"shape":"CertificateNotFoundException"} ], - "documentation":"

Adds the specified SSL server certificate to the certificate list for the specified HTTPS or TLS listener.

If the certificate in already in the certificate list, the call is successful but the certificate is not added again.

To get the certificate list for a listener, use DescribeListenerCertificates. To remove certificates from the certificate list for a listener, use RemoveListenerCertificates. To replace the default certificate for a listener, use ModifyListener.

For more information, see SSL Certificates in the Application Load Balancers Guide.

" + "documentation":"

Adds the specified SSL server certificate to the certificate list for the specified HTTPS or TLS listener.

If the certificate in already in the certificate list, the call is successful but the certificate is not added again.

For more information, see HTTPS listeners in the Application Load Balancers Guide or TLS listeners in the Network Load Balancers Guide.

" }, "AddTags":{ "name":"AddTags", @@ -47,7 +47,7 @@ {"shape":"LoadBalancerNotFoundException"}, {"shape":"TargetGroupNotFoundException"} ], - "documentation":"

Adds the specified tags to the specified Elastic Load Balancing resource. You can tag your Application Load Balancers, Network Load Balancers, target groups, listeners, and rules.

Each tag consists of a key and an optional value. If a resource already has a tag with the same key, AddTags updates its value.

To list the current tags for your resources, use DescribeTags. To remove tags from your resources, use RemoveTags.

" + "documentation":"

Adds the specified tags to the specified Elastic Load Balancing resource. You can tag your Application Load Balancers, Network Load Balancers, Gateway Load Balancers, target groups, listeners, and rules.

Each tag consists of a key and an optional value. If a resource already has a tag with the same key, AddTags updates its value.

" }, "CreateListener":{ "name":"CreateListener", @@ -80,7 +80,7 @@ {"shape":"ALPNPolicyNotSupportedException"}, {"shape":"TooManyTagsException"} ], - "documentation":"

Creates a listener for the specified Application Load Balancer or Network Load Balancer.

To update a listener, use ModifyListener. When you are finished with a listener, you can delete it using DeleteListener. If you are finished with both the listener and the load balancer, you can delete them both using DeleteLoadBalancer.

This operation is idempotent, which means that it completes at most one time. If you attempt to create multiple listeners with the same settings, each call succeeds.

For more information, see Listeners for Your Application Load Balancers in the Application Load Balancers Guide and Listeners for Your Network Load Balancers in the Network Load Balancers Guide.

" + "documentation":"

Creates a listener for the specified Application Load Balancer, Network Load Balancer. or Gateway Load Balancer.

For more information, see the following:

This operation is idempotent, which means that it completes at most one time. If you attempt to create multiple listeners with the same settings, each call succeeds.

" }, "CreateLoadBalancer":{ "name":"CreateLoadBalancer", @@ -108,7 +108,7 @@ {"shape":"AvailabilityZoneNotSupportedException"}, {"shape":"OperationNotPermittedException"} ], - "documentation":"

Creates an Application Load Balancer or a Network Load Balancer.

When you create a load balancer, you can specify security groups, public subnets, IP address type, and tags. Otherwise, you could do so later using SetSecurityGroups, SetSubnets, SetIpAddressType, and AddTags.

To create listeners for your load balancer, use CreateListener. To describe your current load balancers, see DescribeLoadBalancers. When you are finished with a load balancer, you can delete it using DeleteLoadBalancer.

For limit information, see Limits for Your Application Load Balancer in the Application Load Balancers Guide and Limits for Your Network Load Balancer in the Network Load Balancers Guide.

This operation is idempotent, which means that it completes at most one time. If you attempt to create multiple load balancers with the same settings, each call succeeds.

For more information, see Application Load Balancers in the Application Load Balancers Guide and Network Load Balancers in the Network Load Balancers Guide.

" + "documentation":"

Creates an Application Load Balancer, Network Load Balancer, or Gateway Load Balancer.

For more information, see the following:

This operation is idempotent, which means that it completes at most one time. If you attempt to create multiple load balancers with the same settings, each call succeeds.

" }, "CreateRule":{ "name":"CreateRule", @@ -138,7 +138,7 @@ {"shape":"TooManyUniqueTargetGroupsPerLoadBalancerException"}, {"shape":"TooManyTagsException"} ], - "documentation":"

Creates a rule for the specified listener. The listener must be associated with an Application Load Balancer.

Each rule consists of a priority, one or more actions, and one or more conditions. Rules are evaluated in priority order, from the lowest value to the highest value. When the conditions for a rule are met, its actions are performed. If the conditions for no rules are met, the actions for the default rule are performed. For more information, see Listener Rules in the Application Load Balancers Guide.

To view your current rules, use DescribeRules. To update a rule, use ModifyRule. To set the priorities of your rules, use SetRulePriorities. To delete a rule, use DeleteRule.

" + "documentation":"

Creates a rule for the specified listener. The listener must be associated with an Application Load Balancer.

Each rule consists of a priority, one or more actions, and one or more conditions. Rules are evaluated in priority order, from the lowest value to the highest value. When the conditions for a rule are met, its actions are performed. If the conditions for no rules are met, the actions for the default rule are performed. For more information, see Listener rules in the Application Load Balancers Guide.

" }, "CreateTargetGroup":{ "name":"CreateTargetGroup", @@ -157,7 +157,7 @@ {"shape":"InvalidConfigurationRequestException"}, {"shape":"TooManyTagsException"} ], - "documentation":"

Creates a target group.

To register targets with the target group, use RegisterTargets. To update the health check settings for the target group, use ModifyTargetGroup. To monitor the health of targets in the target group, use DescribeTargetHealth.

To route traffic to the targets in a target group, specify the target group in an action using CreateListener or CreateRule.

To delete a target group, use DeleteTargetGroup.

This operation is idempotent, which means that it completes at most one time. If you attempt to create multiple target groups with the same settings, each call succeeds.

For more information, see Target Groups for Your Application Load Balancers in the Application Load Balancers Guide or Target Groups for Your Network Load Balancers in the Network Load Balancers Guide.

" + "documentation":"

Creates a target group.

For more information, see the following:

This operation is idempotent, which means that it completes at most one time. If you attempt to create multiple target groups with the same settings, each call succeeds.

" }, "DeleteListener":{ "name":"DeleteListener", @@ -173,7 +173,7 @@ "errors":[ {"shape":"ListenerNotFoundException"} ], - "documentation":"

Deletes the specified listener.

Alternatively, your listener is deleted when you delete the load balancer to which it is attached, using DeleteLoadBalancer.

" + "documentation":"

Deletes the specified listener.

Alternatively, your listener is deleted when you delete the load balancer to which it is attached.

" }, "DeleteLoadBalancer":{ "name":"DeleteLoadBalancer", @@ -191,7 +191,7 @@ {"shape":"OperationNotPermittedException"}, {"shape":"ResourceInUseException"} ], - "documentation":"

Deletes the specified Application Load Balancer or Network Load Balancer and its attached listeners.

You can't delete a load balancer if deletion protection is enabled. If the load balancer does not exist or has already been deleted, the call succeeds.

Deleting a load balancer does not affect its registered targets. For example, your EC2 instances continue to run and are still registered to their target groups. If you no longer need these EC2 instances, you can stop or terminate them.

" + "documentation":"

Deletes the specified Application Load Balancer, Network Load Balancer, or Gateway Load Balancer. Deleting a load balancer also deletes its listeners.

You can't delete a load balancer if deletion protection is enabled. If the load balancer does not exist or has already been deleted, the call succeeds.

Deleting a load balancer does not affect its registered targets. For example, your EC2 instances continue to run and are still registered to their target groups. If you no longer need these EC2 instances, you can stop or terminate them.

" }, "DeleteRule":{ "name":"DeleteRule", @@ -224,7 +224,7 @@ "errors":[ {"shape":"ResourceInUseException"} ], - "documentation":"

Deletes the specified target group.

You can delete a target group if it is not referenced by any actions. Deleting a target group also deletes any associated health checks.

" + "documentation":"

Deletes the specified target group.

You can delete a target group if it is not referenced by any actions. Deleting a target group also deletes any associated health checks. Deleting a target group does not affect its registered targets. For example, any EC2 instances continue to run until you stop or terminate them.

" }, "DeregisterTargets":{ "name":"DeregisterTargets", @@ -254,7 +254,7 @@ "shape":"DescribeAccountLimitsOutput", "resultWrapper":"DescribeAccountLimitsResult" }, - "documentation":"

Describes the current Elastic Load Balancing resource limits for your AWS account.

For more information, see Limits for Your Application Load Balancers in the Application Load Balancer Guide or Limits for Your Network Load Balancers in the Network Load Balancers Guide.

" + "documentation":"

Describes the current Elastic Load Balancing resource limits for your AWS account.

For more information, see the following:

" }, "DescribeListenerCertificates":{ "name":"DescribeListenerCertificates", @@ -270,7 +270,7 @@ "errors":[ {"shape":"ListenerNotFoundException"} ], - "documentation":"

Describes the default certificate and the certificate list for the specified HTTPS or TLS listener.

If the default certificate is also in the certificate list, it appears twice in the results (once with IsDefault set to true and once with IsDefault set to false).

For more information, see SSL Certificates in the Application Load Balancers Guide.

" + "documentation":"

Describes the default certificate and the certificate list for the specified HTTPS or TLS listener.

If the default certificate is also in the certificate list, it appears twice in the results (once with IsDefault set to true and once with IsDefault set to false).

For more information, see SSL certificates in the Application Load Balancers Guide or Server certificates in the Network Load Balancers Guide.

" }, "DescribeListeners":{ "name":"DescribeListeners", @@ -288,7 +288,7 @@ {"shape":"LoadBalancerNotFoundException"}, {"shape":"UnsupportedProtocolException"} ], - "documentation":"

Describes the specified listeners or the listeners for the specified Application Load Balancer or Network Load Balancer. You must specify either a load balancer or one or more listeners.

For an HTTPS or TLS listener, the output includes the default certificate for the listener. To describe the certificate list for the listener, use DescribeListenerCertificates.

" + "documentation":"

Describes the specified listeners or the listeners for the specified Application Load Balancer, Network Load Balancer, or Gateway Load Balancer. You must specify either a load balancer or one or more listeners.

" }, "DescribeLoadBalancerAttributes":{ "name":"DescribeLoadBalancerAttributes", @@ -304,7 +304,7 @@ "errors":[ {"shape":"LoadBalancerNotFoundException"} ], - "documentation":"

Describes the attributes for the specified Application Load Balancer or Network Load Balancer.

For more information, see Load Balancer Attributes in the Application Load Balancers Guide or Load Balancer Attributes in the Network Load Balancers Guide.

" + "documentation":"

Describes the attributes for the specified Application Load Balancer, Network Load Balancer, or Gateway Load Balancer.

For more information, see the following:

" }, "DescribeLoadBalancers":{ "name":"DescribeLoadBalancers", @@ -320,7 +320,7 @@ "errors":[ {"shape":"LoadBalancerNotFoundException"} ], - "documentation":"

Describes the specified load balancers or all of your load balancers.

To describe the listeners for a load balancer, use DescribeListeners. To describe the attributes for a load balancer, use DescribeLoadBalancerAttributes.

" + "documentation":"

Describes the specified load balancers or all of your load balancers.

" }, "DescribeRules":{ "name":"DescribeRules", @@ -354,7 +354,7 @@ "errors":[ {"shape":"SSLPolicyNotFoundException"} ], - "documentation":"

Describes the specified policies or all policies used for SSL negotiation.

For more information, see Security Policies in the Application Load Balancers Guide.

" + "documentation":"

Describes the specified policies or all policies used for SSL negotiation.

For more information, see Security policies in the Application Load Balancers Guide or Security policies in the Network Load Balancers Guide.

" }, "DescribeTags":{ "name":"DescribeTags", @@ -373,7 +373,7 @@ {"shape":"ListenerNotFoundException"}, {"shape":"RuleNotFoundException"} ], - "documentation":"

Describes the tags for the specified Elastic Load Balancing resources. You can describe the tags for one or more Application Load Balancers, Network Load Balancers, target groups, listeners, or rules.

" + "documentation":"

Describes the tags for the specified Elastic Load Balancing resources. You can describe the tags for one or more Application Load Balancers, Network Load Balancers, Gateway Load Balancers, target groups, listeners, or rules.

" }, "DescribeTargetGroupAttributes":{ "name":"DescribeTargetGroupAttributes", @@ -389,7 +389,7 @@ "errors":[ {"shape":"TargetGroupNotFoundException"} ], - "documentation":"

Describes the attributes for the specified target group.

For more information, see Target Group Attributes in the Application Load Balancers Guide or Target Group Attributes in the Network Load Balancers Guide.

" + "documentation":"

Describes the attributes for the specified target group.

For more information, see the following:

" }, "DescribeTargetGroups":{ "name":"DescribeTargetGroups", @@ -406,7 +406,7 @@ {"shape":"LoadBalancerNotFoundException"}, {"shape":"TargetGroupNotFoundException"} ], - "documentation":"

Describes the specified target groups or all of your target groups. By default, all target groups are described. Alternatively, you can specify one of the following to filter the results: the ARN of the load balancer, the names of one or more target groups, or the ARNs of one or more target groups.

To describe the targets for a target group, use DescribeTargetHealth. To describe the attributes of a target group, use DescribeTargetGroupAttributes.

" + "documentation":"

Describes the specified target groups or all of your target groups. By default, all target groups are described. Alternatively, you can specify one of the following to filter the results: the ARN of the load balancer, the names of one or more target groups, or the ARNs of one or more target groups.

" }, "DescribeTargetHealth":{ "name":"DescribeTargetHealth", @@ -473,7 +473,7 @@ {"shape":"LoadBalancerNotFoundException"}, {"shape":"InvalidConfigurationRequestException"} ], - "documentation":"

Modifies the specified attributes of the specified Application Load Balancer or Network Load Balancer.

If any of the specified attributes can't be modified as requested, the call fails. Any existing attributes that you do not modify retain their current values.

" + "documentation":"

Modifies the specified attributes of the specified Application Load Balancer, Network Load Balancer, or Gateway Load Balancer.

If any of the specified attributes can't be modified as requested, the call fails. Any existing attributes that you do not modify retain their current values.

" }, "ModifyRule":{ "name":"ModifyRule", @@ -499,7 +499,7 @@ {"shape":"InvalidLoadBalancerActionException"}, {"shape":"TooManyUniqueTargetGroupsPerLoadBalancerException"} ], - "documentation":"

Replaces the specified properties of the specified rule. Any properties that you do not specify are unchanged.

To add an item to a list, remove an item from a list, or update an item in a list, you must provide the entire list. For example, to add an action, specify a list with the current actions plus the new action.

To modify the actions for the default rule, use ModifyListener.

" + "documentation":"

Replaces the specified properties of the specified rule. Any properties that you do not specify are unchanged.

To add an item to a list, remove an item from a list, or update an item in a list, you must provide the entire list. For example, to add an action, specify a list with the current actions plus the new action.

" }, "ModifyTargetGroup":{ "name":"ModifyTargetGroup", @@ -516,7 +516,7 @@ {"shape":"TargetGroupNotFoundException"}, {"shape":"InvalidConfigurationRequestException"} ], - "documentation":"

Modifies the health checks used when evaluating the health state of the targets in the specified target group.

To monitor the health of the targets, use DescribeTargetHealth.

" + "documentation":"

Modifies the health checks used when evaluating the health state of the targets in the specified target group.

" }, "ModifyTargetGroupAttributes":{ "name":"ModifyTargetGroupAttributes", @@ -552,7 +552,7 @@ {"shape":"InvalidTargetException"}, {"shape":"TooManyRegistrationsForTargetIdException"} ], - "documentation":"

Registers the specified targets with the specified target group.

If the target is an EC2 instance, it must be in the running state when you register it.

By default, the load balancer routes requests to registered targets using the protocol and port for the target group. Alternatively, you can override the port for a target when you register it. You can register each EC2 instance or IP address with the same target group multiple times using different ports.

With a Network Load Balancer, you cannot register instances by instance ID if they have the following instance types: C1, CC1, CC2, CG1, CG2, CR1, CS1, G1, G2, HI1, HS1, M1, M2, M3, and T1. You can register instances of these types by IP address.

To remove a target from a target group, use DeregisterTargets.

" + "documentation":"

Registers the specified targets with the specified target group.

If the target is an EC2 instance, it must be in the running state when you register it.

By default, the load balancer routes requests to registered targets using the protocol and port for the target group. Alternatively, you can override the port for a target when you register it. You can register each EC2 instance or IP address with the same target group multiple times using different ports.

With a Network Load Balancer, you cannot register instances by instance ID if they have the following instance types: C1, CC1, CC2, CG1, CG2, CR1, CS1, G1, G2, HI1, HS1, M1, M2, M3, and T1. You can register instances of these types by IP address.

" }, "RemoveListenerCertificates":{ "name":"RemoveListenerCertificates", @@ -569,7 +569,7 @@ {"shape":"ListenerNotFoundException"}, {"shape":"OperationNotPermittedException"} ], - "documentation":"

Removes the specified certificate from the certificate list for the specified HTTPS or TLS listener.

You can't remove the default certificate for a listener. To replace the default certificate, call ModifyListener.

To list the certificates for your listener, use DescribeListenerCertificates.

" + "documentation":"

Removes the specified certificate from the certificate list for the specified HTTPS or TLS listener.

" }, "RemoveTags":{ "name":"RemoveTags", @@ -589,7 +589,7 @@ {"shape":"RuleNotFoundException"}, {"shape":"TooManyTagsException"} ], - "documentation":"

Removes the specified tags from the specified Elastic Load Balancing resources. You can remove the tags for one or more Application Load Balancers, Network Load Balancers, target groups, listeners, or rules.

To list the current tags for your resources, use DescribeTags.

" + "documentation":"

Removes the specified tags from the specified Elastic Load Balancing resources. You can remove the tags for one or more Application Load Balancers, Network Load Balancers, Gateway Load Balancers, target groups, listeners, or rules.

" }, "SetIpAddressType":{ "name":"SetIpAddressType", @@ -643,7 +643,7 @@ {"shape":"InvalidConfigurationRequestException"}, {"shape":"InvalidSecurityGroupException"} ], - "documentation":"

Associates the specified security groups with the specified Application Load Balancer. The specified security groups override the previously associated security groups.

You can't specify a security group for a Network Load Balancer.

" + "documentation":"

Associates the specified security groups with the specified Application Load Balancer. The specified security groups override the previously associated security groups.

You can't specify a security group for a Network Load Balancer or Gateway Load Balancer.

" }, "SetSubnets":{ "name":"SetSubnets", @@ -664,7 +664,7 @@ {"shape":"AllocationIdNotFoundException"}, {"shape":"AvailabilityZoneNotSupportedException"} ], - "documentation":"

Enables the Availability Zones for the specified public subnets for the specified load balancer. The specified subnets replace the previously enabled subnets.

When you specify subnets for a Network Load Balancer, you must include all subnets that were enabled previously, with their existing configurations, plus any additional subnets.

" + "documentation":"

Enables the Availability Zones for the specified public subnets for the specified Application Load Balancer or Network Load Balancer. The specified subnets replace the previously enabled subnets.

When you specify subnets for a Network Load Balancer, you must include all subnets that were enabled previously, with their existing configurations, plus any additional subnets.

" } }, "shapes":{ @@ -1052,8 +1052,6 @@ "type":"structure", "required":[ "LoadBalancerArn", - "Protocol", - "Port", "DefaultActions" ], "members":{ @@ -1063,27 +1061,27 @@ }, "Protocol":{ "shape":"ProtocolEnum", - "documentation":"

The protocol for connections from clients to the load balancer. For Application Load Balancers, the supported protocols are HTTP and HTTPS. For Network Load Balancers, the supported protocols are TCP, TLS, UDP, and TCP_UDP.

" + "documentation":"

The protocol for connections from clients to the load balancer. For Application Load Balancers, the supported protocols are HTTP and HTTPS. For Network Load Balancers, the supported protocols are TCP, TLS, UDP, and TCP_UDP. You can’t specify the UDP or TCP_UDP protocol if dual-stack mode is enabled. You cannot specify a protocol for a Gateway Load Balancer.

" }, "Port":{ "shape":"Port", - "documentation":"

The port on which the load balancer is listening.

" + "documentation":"

The port on which the load balancer is listening. You cannot specify a port for a Gateway Load Balancer.

" }, "SslPolicy":{ "shape":"SslPolicyName", - "documentation":"

[HTTPS and TLS listeners] The security policy that defines which protocols and ciphers are supported. The following are the possible values:

  • ELBSecurityPolicy-2016-08

  • ELBSecurityPolicy-TLS-1-0-2015-04

  • ELBSecurityPolicy-TLS-1-1-2017-01

  • ELBSecurityPolicy-TLS-1-2-2017-01

  • ELBSecurityPolicy-TLS-1-2-Ext-2018-06

  • ELBSecurityPolicy-FS-2018-06

  • ELBSecurityPolicy-FS-1-1-2019-08

  • ELBSecurityPolicy-FS-1-2-2019-08

  • ELBSecurityPolicy-FS-1-2-Res-2019-08

For more information, see Security Policies in the Application Load Balancers Guide and Security Policies in the Network Load Balancers Guide.

" + "documentation":"

[HTTPS and TLS listeners] The security policy that defines which protocols and ciphers are supported.

For more information, see Security policies in the Application Load Balancers Guide and Security policies in the Network Load Balancers Guide.

" }, "Certificates":{ "shape":"CertificateList", - "documentation":"

[HTTPS and TLS listeners] The default certificate for the listener. You must provide exactly one certificate. Set CertificateArn to the certificate ARN but do not set IsDefault.

To create a certificate list for the listener, use AddListenerCertificates.

" + "documentation":"

[HTTPS and TLS listeners] The default certificate for the listener. You must provide exactly one certificate. Set CertificateArn to the certificate ARN but do not set IsDefault.

" }, "DefaultActions":{ "shape":"Actions", - "documentation":"

The actions for the default rule. The rule must include one forward action or one or more fixed-response actions.

If the action type is forward, you specify one or more target groups. The protocol of the target group must be HTTP or HTTPS for an Application Load Balancer. The protocol of the target group must be TCP, TLS, UDP, or TCP_UDP for a Network Load Balancer.

[HTTPS listeners] If the action type is authenticate-oidc, you authenticate users through an identity provider that is OpenID Connect (OIDC) compliant.

[HTTPS listeners] If the action type is authenticate-cognito, you authenticate users through the user pools supported by Amazon Cognito.

[Application Load Balancer] If the action type is redirect, you redirect specified client requests from one URL to another.

[Application Load Balancer] If the action type is fixed-response, you drop specified client requests and return a custom HTTP response.

" + "documentation":"

The actions for the default rule.

" }, "AlpnPolicy":{ "shape":"AlpnPolicyName", - "documentation":"

[TLS listeners] The name of the Application-Layer Protocol Negotiation (ALPN) policy. You can specify one policy name. The following are the possible values:

  • HTTP1Only

  • HTTP2Only

  • HTTP2Optional

  • HTTP2Preferred

  • None

For more information, see ALPN Policies in the Network Load Balancers Guide.

" + "documentation":"

[TLS listeners] The name of the Application-Layer Protocol Negotiation (ALPN) policy. You can specify one policy name. The following are the possible values:

  • HTTP1Only

  • HTTP2Only

  • HTTP2Optional

  • HTTP2Preferred

  • None

For more information, see ALPN policies in the Network Load Balancers Guide.

" }, "Tags":{ "shape":"TagList", @@ -1110,11 +1108,11 @@ }, "Subnets":{ "shape":"Subnets", - "documentation":"

The IDs of the public subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings.

[Application Load Balancers] You must specify subnets from at least two Availability Zones.

[Application Load Balancers on Outposts] You must specify one Outpost subnet.

[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.

[Network Load Balancers] You can specify subnets from one or more Availability Zones.

" + "documentation":"

The IDs of the public subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings.

[Application Load Balancers] You must specify subnets from at least two Availability Zones.

[Application Load Balancers on Outposts] You must specify one Outpost subnet.

[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.

[Network Load Balancers] You can specify subnets from one or more Availability Zones.

[Gateway Load Balancers] You can specify subnets from one or more Availability Zones.

" }, "SubnetMappings":{ "shape":"SubnetMappings", - "documentation":"

The IDs of the public subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings.

[Application Load Balancers] You must specify subnets from at least two Availability Zones. You cannot specify Elastic IP addresses for your subnets.

[Application Load Balancers on Outposts] You must specify one Outpost subnet.

[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.

[Network Load Balancers] You can specify subnets from one or more Availability Zones. You can specify one Elastic IP address per subnet if you need static IP addresses for your internet-facing load balancer. For internal load balancers, you can specify one private IP address per subnet from the IPv4 range of the subnet.

" + "documentation":"

The IDs of the public subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings.

[Application Load Balancers] You must specify subnets from at least two Availability Zones. You cannot specify Elastic IP addresses for your subnets.

[Application Load Balancers on Outposts] You must specify one Outpost subnet.

[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.

[Network Load Balancers] You can specify subnets from one or more Availability Zones. You can specify one Elastic IP address per subnet if you need static IP addresses for your internet-facing load balancer. For internal load balancers, you can specify one private IP address per subnet from the IPv4 range of the subnet. For internet-facing load balancer, you can specify one IPv6 address per subnet.

[Gateway Load Balancers] You can specify subnets from one or more Availability Zones. You cannot specify Elastic IP addresses for your subnets.

" }, "SecurityGroups":{ "shape":"SecurityGroups", @@ -1122,7 +1120,7 @@ }, "Scheme":{ "shape":"LoadBalancerSchemeEnum", - "documentation":"

The nodes of an Internet-facing load balancer have public IP addresses. The DNS name of an Internet-facing load balancer is publicly resolvable to the public IP addresses of the nodes. Therefore, Internet-facing load balancers can route requests from clients over the internet.

The nodes of an internal load balancer have only private IP addresses. The DNS name of an internal load balancer is publicly resolvable to the private IP addresses of the nodes. Therefore, internal load balancers can route requests only from clients with access to the VPC for the load balancer.

The default is an Internet-facing load balancer.

" + "documentation":"

The nodes of an Internet-facing load balancer have public IP addresses. The DNS name of an Internet-facing load balancer is publicly resolvable to the public IP addresses of the nodes. Therefore, Internet-facing load balancers can route requests from clients over the internet.

The nodes of an internal load balancer have only private IP addresses. The DNS name of an internal load balancer is publicly resolvable to the private IP addresses of the nodes. Therefore, internal load balancers can route requests only from clients with access to the VPC for the load balancer.

The default is an Internet-facing load balancer.

You cannot specify a scheme for a Gateway Load Balancer.

" }, "Tags":{ "shape":"TagList", @@ -1134,7 +1132,7 @@ }, "IpAddressType":{ "shape":"IpAddressType", - "documentation":"

[Application Load Balancers] The type of IP addresses used by the subnets for your load balancer. The possible values are ipv4 (for IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). Internal load balancers must use ipv4.

" + "documentation":"

The type of IP addresses used by the subnets for your load balancer. The possible values are ipv4 (for IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). Internal load balancers must use ipv4.

" }, "CustomerOwnedIpv4Pool":{ "shape":"CustomerOwnedIpv4Pool", @@ -1166,7 +1164,7 @@ }, "Conditions":{ "shape":"RuleConditionList", - "documentation":"

The conditions. Each rule can optionally include up to one of each of the following conditions: http-request-method, host-header, path-pattern, and source-ip. Each rule can also optionally include one or more of each of the following conditions: http-header and query-string.

" + "documentation":"

The conditions.

" }, "Priority":{ "shape":"RulePriority", @@ -1174,7 +1172,7 @@ }, "Actions":{ "shape":"Actions", - "documentation":"

The actions. Each rule must include exactly one of the following types of actions: forward, fixed-response, or redirect, and it must be the last action to be performed.

If the action type is forward, you specify one or more target groups. The protocol of the target group must be HTTP or HTTPS for an Application Load Balancer. The protocol of the target group must be TCP, TLS, UDP, or TCP_UDP for a Network Load Balancer.

[HTTPS listeners] If the action type is authenticate-oidc, you authenticate users through an identity provider that is OpenID Connect (OIDC) compliant.

[HTTPS listeners] If the action type is authenticate-cognito, you authenticate users through the user pools supported by Amazon Cognito.

[Application Load Balancer] If the action type is redirect, you redirect specified client requests from one URL to another.

[Application Load Balancer] If the action type is fixed-response, you drop specified client requests and return a custom HTTP response.

" + "documentation":"

The actions.

" }, "Tags":{ "shape":"TagList", @@ -1201,11 +1199,15 @@ }, "Protocol":{ "shape":"ProtocolEnum", - "documentation":"

The protocol to use for routing traffic to the targets. For Application Load Balancers, the supported protocols are HTTP and HTTPS. For Network Load Balancers, the supported protocols are TCP, TLS, UDP, or TCP_UDP. A TCP_UDP listener must be associated with a TCP_UDP target group. If the target is a Lambda function, this parameter does not apply.

" + "documentation":"

The protocol to use for routing traffic to the targets. For Application Load Balancers, the supported protocols are HTTP and HTTPS. For Network Load Balancers, the supported protocols are TCP, TLS, UDP, or TCP_UDP. For Gateway Load Balancers, the supported protocol is GENEVE. A TCP_UDP listener must be associated with a TCP_UDP target group. If the target is a Lambda function, this parameter does not apply.

" + }, + "ProtocolVersion":{ + "shape":"ProtocolVersion", + "documentation":"

[HTTP/HTTPS protocol] The protocol version. Specify GRPC to send requests to targets using gRPC. Specify HTTP2 to send requests to targets using HTTP/2. The default is HTTP1, which sends requests to targets using HTTP/1.1.

" }, "Port":{ "shape":"Port", - "documentation":"

The port on which the targets receive traffic. This port is used unless you specify a port override when registering the target. If the target is a Lambda function, this parameter does not apply.

" + "documentation":"

The port on which the targets receive traffic. This port is used unless you specify a port override when registering the target. If the target is a Lambda function, this parameter does not apply. If the protocol is GENEVE, the supported port is 6081.

" }, "VpcId":{ "shape":"VpcId", @@ -1213,11 +1215,11 @@ }, "HealthCheckProtocol":{ "shape":"ProtocolEnum", - "documentation":"

The protocol the load balancer uses when performing health checks on targets. For Application Load Balancers, the default is HTTP. For Network Load Balancers, the default is TCP. The TCP protocol is supported for health checks only if the protocol of the target group is TCP, TLS, UDP, or TCP_UDP. The TLS, UDP, and TCP_UDP protocols are not supported for health checks.

" + "documentation":"

The protocol the load balancer uses when performing health checks on targets. For Application Load Balancers, the default is HTTP. For Network Load Balancers and Gateway Load Balancers, the default is TCP. The TCP protocol is not supported for health checks if the protocol of the target group is HTTP or HTTPS. The GENEVE, TLS, UDP, and TCP_UDP protocols are not supported for health checks.

" }, "HealthCheckPort":{ "shape":"HealthCheckPort", - "documentation":"

The port the load balancer uses when performing health checks on targets. The default is traffic-port, which is the port on which each target receives traffic from the load balancer.

" + "documentation":"

The port the load balancer uses when performing health checks on targets. If the protocol is HTTP, HTTPS, TCP, TLS, UDP, or TCP_UDP, the default is traffic-port, which is the port on which each target receives traffic from the load balancer. If the protocol is GENEVE, the default is port 80.

" }, "HealthCheckEnabled":{ "shape":"HealthCheckEnabled", @@ -1225,31 +1227,31 @@ }, "HealthCheckPath":{ "shape":"Path", - "documentation":"

[HTTP/HTTPS health checks] The ping path that is the destination on the targets for health checks. The default is /.

" + "documentation":"

[HTTP/HTTPS health checks] The destination for health checks on the targets.

[HTTP1 or HTTP2 protocol version] The ping path. The default is /.

[GRPC protocol version] The path of a custom health check method with the format /package.service/method. The default is /AWS.ALB/healthcheck.

" }, "HealthCheckIntervalSeconds":{ "shape":"HealthCheckIntervalSeconds", - "documentation":"

The approximate amount of time, in seconds, between health checks of an individual target. For HTTP and HTTPS health checks, the range is 5–300 seconds. For TCP health checks, the supported values are 10 and 30 seconds. If the target type is instance or ip, the default is 30 seconds. If the target type is lambda, the default is 35 seconds.

" + "documentation":"

The approximate amount of time, in seconds, between health checks of an individual target. For TCP health checks, the supported values are 10 and 30 seconds. If the target type is instance or ip, the default is 30 seconds. If the target group protocol is GENEVE, the default is 10 seconds. If the target type is lambda, the default is 35 seconds.

" }, "HealthCheckTimeoutSeconds":{ "shape":"HealthCheckTimeoutSeconds", - "documentation":"

The amount of time, in seconds, during which no response from a target means a failed health check. For target groups with a protocol of HTTP or HTTPS, the default is 5 seconds. For target groups with a protocol of TCP or TLS, this value must be 6 seconds for HTTP health checks and 10 seconds for TCP and HTTPS health checks. If the target type is lambda, the default is 30 seconds.

" + "documentation":"

The amount of time, in seconds, during which no response from a target means a failed health check. For target groups with a protocol of HTTP, HTTPS, or GENEVE, the default is 5 seconds. For target groups with a protocol of TCP or TLS, this value must be 6 seconds for HTTP health checks and 10 seconds for TCP and HTTPS health checks. If the target type is lambda, the default is 30 seconds.

" }, "HealthyThresholdCount":{ "shape":"HealthCheckThresholdCount", - "documentation":"

The number of consecutive health checks successes required before considering an unhealthy target healthy. For target groups with a protocol of HTTP or HTTPS, the default is 5. For target groups with a protocol of TCP or TLS, the default is 3. If the target type is lambda, the default is 5.

" + "documentation":"

The number of consecutive health checks successes required before considering an unhealthy target healthy. For target groups with a protocol of HTTP or HTTPS, the default is 5. For target groups with a protocol of TCP, TLS, or GENEVE, the default is 3. If the target type is lambda, the default is 5.

" }, "UnhealthyThresholdCount":{ "shape":"HealthCheckThresholdCount", - "documentation":"

The number of consecutive health check failures required before considering a target unhealthy. For target groups with a protocol of HTTP or HTTPS, the default is 2. For target groups with a protocol of TCP or TLS, this value must be the same as the healthy threshold count. If the target type is lambda, the default is 2.

" + "documentation":"

The number of consecutive health check failures required before considering a target unhealthy. If the target group protocol is HTTP or HTTPS, the default is 2. If the target group protocol is TCP or TLS, this value must be the same as the healthy threshold count. If the target group protocol is GENEVE, the default is 3. If the target type is lambda, the default is 2.

" }, "Matcher":{ "shape":"Matcher", - "documentation":"

[HTTP/HTTPS health checks] The HTTP codes to use when checking for a successful response from a target.

" + "documentation":"

[HTTP/HTTPS health checks] The HTTP or gRPC codes to use when checking for a successful response from a target.

" }, "TargetType":{ "shape":"TargetTypeEnum", - "documentation":"

The type of target that you must specify when registering targets with this target group. You can't specify targets for a target group using more than one target type.

  • instance - Targets are specified by instance ID. This is the default value.

  • ip - Targets are specified by IP address. You can specify IP addresses from the subnets of the virtual private cloud (VPC) for the target group, the RFC 1918 range (10.0.0.0/8, 172.16.0.0/12, and 192.168.0.0/16), and the RFC 6598 range (100.64.0.0/10). You can't specify publicly routable IP addresses.

  • lambda - The target groups contains a single Lambda function.

" + "documentation":"

The type of target that you must specify when registering targets with this target group. You can't specify targets for a target group using more than one target type.

  • instance - Register targets by instance ID. This is the default value.

  • ip - Register targets by IP address. You can specify IP addresses from the subnets of the virtual private cloud (VPC) for the target group, the RFC 1918 range (10.0.0.0/8, 172.16.0.0/12, and 192.168.0.0/16), and the RFC 6598 range (100.64.0.0/10). You can't specify publicly routable IP addresses.

  • lambda - Register a single Lambda function as a target.

" }, "Tags":{ "shape":"TagList", @@ -1759,6 +1761,7 @@ }, "documentation":"

Information about a forward action.

" }, + "GrpcCode":{"type":"string"}, "HealthCheckEnabled":{"type":"boolean"}, "HealthCheckIntervalSeconds":{ "type":"integer", @@ -1823,6 +1826,7 @@ }, "documentation":"

Information about an HTTP method condition.

HTTP defines a set of request methods, also referred to as HTTP verbs. For more information, see the HTTP Method Registry. You can also define custom HTTP methods.

" }, + "IPv6Address":{"type":"string"}, "IncompatibleProtocolsException":{ "type":"structure", "members":{ @@ -1921,7 +1925,7 @@ "members":{ "Name":{ "shape":"Name", - "documentation":"

The name of the limit. The possible values are:

  • application-load-balancers

  • listeners-per-application-load-balancer

  • listeners-per-network-load-balancer

  • network-load-balancers

  • rules-per-application-load-balancer

  • target-groups

  • target-groups-per-action-on-application-load-balancer

  • target-groups-per-action-on-network-load-balancer

  • target-groups-per-application-load-balancer

  • targets-per-application-load-balancer

  • targets-per-availability-zone-per-network-load-balancer

  • targets-per-network-load-balancer

" + "documentation":"

The name of the limit. The possible values are:

  • application-load-balancers

  • condition-values-per-alb-rule

  • condition-wildcards-per-alb-rule

  • gateway-load-balancers

  • gateway-load-balancers-per-vpc

  • geneve-target-groups

  • listeners-per-application-load-balancer

  • listeners-per-network-load-balancer

  • network-load-balancers

  • rules-per-application-load-balancer

  • target-groups

  • target-groups-per-action-on-application-load-balancer

  • target-groups-per-action-on-network-load-balancer

  • target-groups-per-application-load-balancer

  • targets-per-application-load-balancer

  • targets-per-availability-zone-per-gateway-load-balancer

  • targets-per-availability-zone-per-network-load-balancer

  • targets-per-network-load-balancer

" }, "Max":{ "shape":"Max", @@ -2069,6 +2073,10 @@ "PrivateIPv4Address":{ "shape":"PrivateIPv4Address", "documentation":"

[Network Load Balancers] The private IPv4 address for an internal load balancer.

" + }, + "IPv6Address":{ + "shape":"IPv6Address", + "documentation":"

[Network Load Balancers] The IPv6 address.

" } }, "documentation":"

Information about a static IP address for a load balancer.

" @@ -2087,7 +2095,7 @@ "members":{ "Key":{ "shape":"LoadBalancerAttributeKey", - "documentation":"

The name of the attribute.

The following attributes are supported by both Application Load Balancers and Network Load Balancers:

  • access_logs.s3.enabled - Indicates whether access logs are enabled. The value is true or false. The default is false.

  • access_logs.s3.bucket - The name of the S3 bucket for the access logs. This attribute is required if access logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.

  • access_logs.s3.prefix - The prefix for the location in the S3 bucket for the access logs.

  • deletion_protection.enabled - Indicates whether deletion protection is enabled. The value is true or false. The default is false.

The following attributes are supported by only Application Load Balancers:

  • idle_timeout.timeout_seconds - The idle timeout value, in seconds. The valid range is 1-4000 seconds. The default is 60 seconds.

  • routing.http.desync_mitigation_mode - Determines how the load balancer handles requests that might pose a security risk to your application. The possible values are monitor, defensive, and strictest. The default is defensive.

  • routing.http.drop_invalid_header_fields.enabled - Indicates whether HTTP headers with invalid header fields are removed by the load balancer (true) or routed to targets (false). The default is false.

  • routing.http2.enabled - Indicates whether HTTP/2 is enabled. The value is true or false. The default is true. Elastic Load Balancing requires that message header names contain only alphanumeric characters and hyphens.

The following attributes are supported by only Network Load Balancers:

  • load_balancing.cross_zone.enabled - Indicates whether cross-zone load balancing is enabled. The value is true or false. The default is false.

" + "documentation":"

The name of the attribute.

The following attribute is supported by all load balancers:

  • deletion_protection.enabled - Indicates whether deletion protection is enabled. The value is true or false. The default is false.

The following attributes are supported by both Application Load Balancers and Network Load Balancers:

  • access_logs.s3.enabled - Indicates whether access logs are enabled. The value is true or false. The default is false.

  • access_logs.s3.bucket - The name of the S3 bucket for the access logs. This attribute is required if access logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.

  • access_logs.s3.prefix - The prefix for the location in the S3 bucket for the access logs.

The following attributes are supported by only Application Load Balancers:

  • idle_timeout.timeout_seconds - The idle timeout value, in seconds. The valid range is 1-4000 seconds. The default is 60 seconds.

  • routing.http.desync_mitigation_mode - Determines how the load balancer handles requests that might pose a security risk to your application. The possible values are monitor, defensive, and strictest. The default is defensive.

  • routing.http.drop_invalid_header_fields.enabled - Indicates whether HTTP headers with invalid header fields are removed by the load balancer (true) or routed to targets (false). The default is false.

  • routing.http2.enabled - Indicates whether HTTP/2 is enabled. The value is true or false. The default is true. Elastic Load Balancing requires that message header names contain only alphanumeric characters and hyphens.

  • waf.fail_open.enabled - Indicates whether to allow a WAF-enabled load balancer to route requests to targets if it is unable to forward the request to AWS WAF. The value is true or false. The default is false.

The following attribute is supported by Network Load Balancers and Gateway Load Balancers:

  • load_balancing.cross_zone.enabled - Indicates whether cross-zone load balancing is enabled. The value is true or false. The default is false.

" }, "Value":{ "shape":"LoadBalancerAttributeValue", @@ -2161,7 +2169,8 @@ "type":"string", "enum":[ "application", - "network" + "network", + "gateway" ] }, "LoadBalancers":{ @@ -2171,14 +2180,17 @@ "Marker":{"type":"string"}, "Matcher":{ "type":"structure", - "required":["HttpCode"], "members":{ "HttpCode":{ "shape":"HttpCode", - "documentation":"

The HTTP codes.

For Application Load Balancers, you can specify values between 200 and 499, and the default value is 200. You can specify multiple values (for example, \"200,202\") or a range of values (for example, \"200-299\").

For Network Load Balancers, this is 200–399.

" + "documentation":"

For Application Load Balancers, you can specify values between 200 and 499, and the default value is 200. You can specify multiple values (for example, \"200,202\") or a range of values (for example, \"200-299\").

For Network Load Balancers and Gateway Load Balancers, this must be \"200–399\".

" + }, + "GrpcCode":{ + "shape":"GrpcCode", + "documentation":"

You can specify values between 0 and 99. You can specify multiple values (for example, \"0,1\") or a range of values (for example, \"0-5\"). The default value is 12.

" } }, - "documentation":"

Information to use when checking for a successful response from a target.

" + "documentation":"

The codes to use when checking for a successful response from a target. If the protocol version is gRPC, these are gRPC codes. Otherwise, these are HTTP codes.

" }, "Max":{"type":"string"}, "ModifyListenerInput":{ @@ -2191,27 +2203,27 @@ }, "Port":{ "shape":"Port", - "documentation":"

The port for connections from clients to the load balancer.

" + "documentation":"

The port for connections from clients to the load balancer. You cannot specify a port for a Gateway Load Balancer.

" }, "Protocol":{ "shape":"ProtocolEnum", - "documentation":"

The protocol for connections from clients to the load balancer. Application Load Balancers support the HTTP and HTTPS protocols. Network Load Balancers support the TCP, TLS, UDP, and TCP_UDP protocols.

" + "documentation":"

The protocol for connections from clients to the load balancer. Application Load Balancers support the HTTP and HTTPS protocols. Network Load Balancers support the TCP, TLS, UDP, and TCP_UDP protocols. You can’t change the protocol to UDP or TCP_UDP if dual-stack mode is enabled. You cannot specify a protocol for a Gateway Load Balancer.

" }, "SslPolicy":{ "shape":"SslPolicyName", - "documentation":"

[HTTPS and TLS listeners] The security policy that defines which protocols and ciphers are supported. The following are the possible values:

  • ELBSecurityPolicy-2016-08

  • ELBSecurityPolicy-TLS-1-0-2015-04

  • ELBSecurityPolicy-TLS-1-1-2017-01

  • ELBSecurityPolicy-TLS-1-2-2017-01

  • ELBSecurityPolicy-TLS-1-2-Ext-2018-06

  • ELBSecurityPolicy-FS-2018-06

  • ELBSecurityPolicy-FS-1-1-2019-08

  • ELBSecurityPolicy-FS-1-2-2019-08

  • ELBSecurityPolicy-FS-1-2-Res-2019-08

For more information, see Security Policies in the Application Load Balancers Guide and Security Policies in the Network Load Balancers Guide.

" + "documentation":"

[HTTPS and TLS listeners] The security policy that defines which protocols and ciphers are supported.

For more information, see Security policies in the Application Load Balancers Guide or Security policies in the Network Load Balancers Guide.

" }, "Certificates":{ "shape":"CertificateList", - "documentation":"

[HTTPS and TLS listeners] The default certificate for the listener. You must provide exactly one certificate. Set CertificateArn to the certificate ARN but do not set IsDefault.

To create a certificate list, use AddListenerCertificates.

" + "documentation":"

[HTTPS and TLS listeners] The default certificate for the listener. You must provide exactly one certificate. Set CertificateArn to the certificate ARN but do not set IsDefault.

" }, "DefaultActions":{ "shape":"Actions", - "documentation":"

The actions for the default rule. The rule must include one forward action or one or more fixed-response actions.

If the action type is forward, you specify one or more target groups. The protocol of the target group must be HTTP or HTTPS for an Application Load Balancer. The protocol of the target group must be TCP, TLS, UDP, or TCP_UDP for a Network Load Balancer.

[HTTPS listeners] If the action type is authenticate-oidc, you authenticate users through an identity provider that is OpenID Connect (OIDC) compliant.

[HTTPS listeners] If the action type is authenticate-cognito, you authenticate users through the user pools supported by Amazon Cognito.

[Application Load Balancer] If the action type is redirect, you redirect specified client requests from one URL to another.

[Application Load Balancer] If the action type is fixed-response, you drop specified client requests and return a custom HTTP response.

" + "documentation":"

The actions for the default rule.

" }, "AlpnPolicy":{ "shape":"AlpnPolicyName", - "documentation":"

[TLS listeners] The name of the Application-Layer Protocol Negotiation (ALPN) policy. You can specify one policy name. The following are the possible values:

  • HTTP1Only

  • HTTP2Only

  • HTTP2Optional

  • HTTP2Preferred

  • None

For more information, see ALPN Policies in the Network Load Balancers Guide.

" + "documentation":"

[TLS listeners] The name of the Application-Layer Protocol Negotiation (ALPN) policy. You can specify one policy name. The following are the possible values:

  • HTTP1Only

  • HTTP2Only

  • HTTP2Optional

  • HTTP2Preferred

  • None

For more information, see ALPN policies in the Network Load Balancers Guide.

" } } }, @@ -2260,11 +2272,11 @@ }, "Conditions":{ "shape":"RuleConditionList", - "documentation":"

The conditions. Each rule can include zero or one of the following conditions: http-request-method, host-header, path-pattern, and source-ip, and zero or more of the following conditions: http-header and query-string.

" + "documentation":"

The conditions.

" }, "Actions":{ "shape":"Actions", - "documentation":"

The actions. Each rule must include exactly one of the following types of actions: forward, fixed-response, or redirect, and it must be the last action to be performed.

If the action type is forward, you specify one or more target groups. The protocol of the target group must be HTTP or HTTPS for an Application Load Balancer. The protocol of the target group must be TCP, TLS, UDP, or TCP_UDP for a Network Load Balancer.

[HTTPS listeners] If the action type is authenticate-oidc, you authenticate users through an identity provider that is OpenID Connect (OIDC) compliant.

[HTTPS listeners] If the action type is authenticate-cognito, you authenticate users through the user pools supported by Amazon Cognito.

[Application Load Balancer] If the action type is redirect, you redirect specified client requests from one URL to another.

[Application Load Balancer] If the action type is fixed-response, you drop specified client requests and return a custom HTTP response.

" + "documentation":"

The actions.

" } } }, @@ -2313,7 +2325,7 @@ }, "HealthCheckProtocol":{ "shape":"ProtocolEnum", - "documentation":"

The protocol the load balancer uses when performing health checks on targets. The TCP protocol is supported for health checks only if the protocol of the target group is TCP, TLS, UDP, or TCP_UDP. The TLS, UDP, and TCP_UDP protocols are not supported for health checks.

With Network Load Balancers, you can't modify this setting.

" + "documentation":"

The protocol the load balancer uses when performing health checks on targets. The TCP protocol is supported for health checks only if the protocol of the target group is TCP, TLS, UDP, or TCP_UDP. The GENEVE, TLS, UDP, and TCP_UDP protocols are not supported for health checks.

With Network Load Balancers, you can't modify this setting.

" }, "HealthCheckPort":{ "shape":"HealthCheckPort", @@ -2321,7 +2333,7 @@ }, "HealthCheckPath":{ "shape":"Path", - "documentation":"

[HTTP/HTTPS health checks] The ping path that is the destination for the health check request.

" + "documentation":"

[HTTP/HTTPS health checks] The destination for health checks on the targets.

[HTTP1 or HTTP2 protocol version] The ping path. The default is /.

[GRPC protocol version] The path of a custom health check method with the format /package.service/method. The default is /AWS.ALB/healthcheck.

" }, "HealthCheckEnabled":{ "shape":"HealthCheckEnabled", @@ -2329,7 +2341,7 @@ }, "HealthCheckIntervalSeconds":{ "shape":"HealthCheckIntervalSeconds", - "documentation":"

The approximate amount of time, in seconds, between health checks of an individual target. For Application Load Balancers, the range is 5 to 300 seconds. For Network Load Balancers, the supported values are 10 or 30 seconds.

With Network Load Balancers, you can't modify this setting.

" + "documentation":"

The approximate amount of time, in seconds, between health checks of an individual target. For TCP health checks, the supported values are 10 or 30 seconds.

With Network Load Balancers, you can't modify this setting.

" }, "HealthCheckTimeoutSeconds":{ "shape":"HealthCheckTimeoutSeconds", @@ -2341,11 +2353,11 @@ }, "UnhealthyThresholdCount":{ "shape":"HealthCheckThresholdCount", - "documentation":"

The number of consecutive health check failures required before considering the target unhealthy. For Network Load Balancers, this value must be the same as the healthy threshold count.

" + "documentation":"

The number of consecutive health check failures required before considering the target unhealthy. For target groups with a protocol of TCP or TLS, this value must be the same as the healthy threshold count.

" }, "Matcher":{ "shape":"Matcher", - "documentation":"

[HTTP/HTTPS health checks] The HTTP codes to use when checking for a successful response from a target. The possible values are from 200 to 499. You can specify multiple values (for example, \"200,202\") or a range of values (for example, \"200-299\"). The default is 200.

With Network Load Balancers, you can't modify this setting.

" + "documentation":"

[HTTP/HTTPS health checks] The HTTP or gRPC codes to use when checking for a successful response from a target.

With Network Load Balancers, you can't modify this setting.

" } } }, @@ -2418,9 +2430,11 @@ "TCP", "TLS", "UDP", - "TCP_UDP" + "TCP_UDP", + "GENEVE" ] }, + "ProtocolVersion":{"type":"string"}, "QueryStringConditionConfig":{ "type":"structure", "members":{ @@ -2520,7 +2534,7 @@ }, "Targets":{ "shape":"TargetDescriptions", - "documentation":"

The targets.

To register a target by instance ID, specify the instance ID. To register a target by IP address, specify the IP address. To register a Lambda function, specify the ARN of the Lambda function.

" + "documentation":"

The targets.

" } } }, @@ -2732,7 +2746,7 @@ }, "IpAddressType":{ "shape":"IpAddressType", - "documentation":"

The IP address type. The possible values are ipv4 (for IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). Internal load balancers must use ipv4. Network Load Balancers must use ipv4.

" + "documentation":"

The IP address type. The possible values are ipv4 (for IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). Internal load balancers must use ipv4. You can’t specify dualstack for a load balancer with a UDP or TCP_UDP listener.

" } } }, @@ -2800,11 +2814,15 @@ }, "Subnets":{ "shape":"Subnets", - "documentation":"

The IDs of the public subnets. You must specify subnets from at least two Availability Zones. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings.

" + "documentation":"

The IDs of the public subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings.

[Application Load Balancers] You must specify subnets from at least two Availability Zones.

[Application Load Balancers on Outposts] You must specify one Outpost subnet.

[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.

[Network Load Balancers] You can specify subnets from one or more Availability Zones.

" }, "SubnetMappings":{ "shape":"SubnetMappings", - "documentation":"

The IDs of the public subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings.

[Application Load Balancers] You must specify subnets from at least two Availability Zones. You cannot specify Elastic IP addresses for your subnets.

[Network Load Balancers] You can specify subnets from one or more Availability Zones. If you need static IP addresses for your internet-facing load balancer, you can specify one Elastic IP address per subnet. For internal load balancers, you can specify one private IP address per subnet from the IPv4 range of the subnet.

" + "documentation":"

The IDs of the public subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings.

[Application Load Balancers] You must specify subnets from at least two Availability Zones. You cannot specify Elastic IP addresses for your subnets.

[Application Load Balancers on Outposts] You must specify one Outpost subnet.

[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.

[Network Load Balancers] You can specify subnets from one or more Availability Zones. You can specify one Elastic IP address per subnet if you need static IP addresses for your internet-facing load balancer. For internal load balancers, you can specify one private IP address per subnet from the IPv4 range of the subnet. For internet-facing load balancer, you can specify one IPv6 address per subnet.

" + }, + "IpAddressType":{ + "shape":"IpAddressType", + "documentation":"

[Network Load Balancers] The type of IP addresses used by the subnets for your load balancer. The possible values are ipv4 (for IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). You can’t specify dualstack for a load balancer with a UDP or TCP_UDP listener. Internal load balancers must use ipv4.

" } } }, @@ -2814,6 +2832,10 @@ "AvailabilityZones":{ "shape":"AvailabilityZones", "documentation":"

Information about the subnets.

" + }, + "IpAddressType":{ + "shape":"IpAddressType", + "documentation":"

[Network Load Balancers] The IP address type.

" } } }, @@ -2877,6 +2899,10 @@ "PrivateIPv4Address":{ "shape":"PrivateIPv4Address", "documentation":"

[Network Load Balancers] The private IPv4 address for an internal load balancer.

" + }, + "IPv6Address":{ + "shape":"IPv6Address", + "documentation":"

[Network Load Balancers] The IPv6 address.

" } }, "documentation":"

Information about a subnet mapping.

" @@ -2965,7 +2991,7 @@ }, "Port":{ "shape":"Port", - "documentation":"

The port on which the target is listening. Not used if the target is a Lambda function.

" + "documentation":"

The port on which the target is listening. If the target group protocol is GENEVE, the supported port is 6081. Not used if the target is a Lambda function.

" }, "AvailabilityZone":{ "shape":"ZoneName", @@ -3003,7 +3029,7 @@ }, "HealthCheckProtocol":{ "shape":"ProtocolEnum", - "documentation":"

The protocol to use to connect with the target.

" + "documentation":"

The protocol to use to connect with the target. The GENEVE, TLS, UDP, and TCP_UDP protocols are not supported for health checks.

" }, "HealthCheckPort":{ "shape":"HealthCheckPort", @@ -3031,11 +3057,11 @@ }, "HealthCheckPath":{ "shape":"Path", - "documentation":"

The destination for the health check request.

" + "documentation":"

The destination for health checks on the targets.

" }, "Matcher":{ "shape":"Matcher", - "documentation":"

The HTTP codes to use when checking for a successful response from a target.

" + "documentation":"

The HTTP or gRPC codes to use when checking for a successful response from a target.

" }, "LoadBalancerArns":{ "shape":"LoadBalancerArns", @@ -3043,7 +3069,11 @@ }, "TargetType":{ "shape":"TargetTypeEnum", - "documentation":"

The type of target that you must specify when registering targets with this target group. The possible values are instance (targets are specified by instance ID) or ip (targets are specified by IP address).

" + "documentation":"

The type of target that you must specify when registering targets with this target group. The possible values are instance (register targets by instance ID), ip (register targets by IP address), or lambda (register a single Lambda function as a target).

" + }, + "ProtocolVersion":{ + "shape":"ProtocolVersion", + "documentation":"

[HTTP/HTTPS protocol] The protocol version. The possible values are GRPC, HTTP1, and HTTP2.

" } }, "documentation":"

Information about a target group.

" @@ -3070,7 +3100,7 @@ "members":{ "Key":{ "shape":"TargetGroupAttributeKey", - "documentation":"

The name of the attribute.

The following attributes are supported by both Application Load Balancers and Network Load Balancers:

  • deregistration_delay.timeout_seconds - The amount of time, in seconds, for Elastic Load Balancing to wait before changing the state of a deregistering target from draining to unused. The range is 0-3600 seconds. The default value is 300 seconds. If the target is a Lambda function, this attribute is not supported.

  • stickiness.enabled - Indicates whether sticky sessions are enabled. The value is true or false. The default is false.

  • stickiness.type - The type of sticky sessions. The possible values are lb_cookie for Application Load Balancers or source_ip for Network Load Balancers.

The following attributes are supported only if the load balancer is an Application Load Balancer and the target is an instance or an IP address:

  • load_balancing.algorithm.type - The load balancing algorithm determines how the load balancer selects targets when routing requests. The value is round_robin or least_outstanding_requests. The default is round_robin.

  • slow_start.duration_seconds - The time period, in seconds, during which a newly registered target receives an increasing share of the traffic to the target group. After this time period ends, the target receives its full share of traffic. The range is 30-900 seconds (15 minutes). The default is 0 seconds (disabled).

  • stickiness.lb_cookie.duration_seconds - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).

The following attribute is supported only if the load balancer is an Application Load Balancer and the target is a Lambda function:

  • lambda.multi_value_headers.enabled - Indicates whether the request and response headers that are exchanged between the load balancer and the Lambda function include arrays of values or strings. The value is true or false. The default is false. If the value is false and the request contains a duplicate header field name or query parameter key, the load balancer uses the last value sent by the client.

The following attribute is supported only by Network Load Balancers:

  • proxy_protocol_v2.enabled - Indicates whether Proxy Protocol version 2 is enabled. The value is true or false. The default is false.

" + "documentation":"

The name of the attribute.

The following attribute is supported by all load balancers:

  • deregistration_delay.timeout_seconds - The amount of time, in seconds, for Elastic Load Balancing to wait before changing the state of a deregistering target from draining to unused. The range is 0-3600 seconds. The default value is 300 seconds. If the target is a Lambda function, this attribute is not supported.

The following attributes are supported by both Application Load Balancers and Network Load Balancers:

  • stickiness.enabled - Indicates whether sticky sessions are enabled. The value is true or false. The default is false.

  • stickiness.type - The type of sticky sessions. The possible values are lb_cookie for Application Load Balancers or source_ip for Network Load Balancers.

The following attributes are supported only if the load balancer is an Application Load Balancer and the target is an instance or an IP address:

  • load_balancing.algorithm.type - The load balancing algorithm determines how the load balancer selects targets when routing requests. The value is round_robin or least_outstanding_requests. The default is round_robin.

  • slow_start.duration_seconds - The time period, in seconds, during which a newly registered target receives an increasing share of the traffic to the target group. After this time period ends, the target receives its full share of traffic. The range is 30-900 seconds (15 minutes). The default is 0 seconds (disabled).

  • stickiness.lb_cookie.duration_seconds - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).

The following attribute is supported only if the load balancer is an Application Load Balancer and the target is a Lambda function:

  • lambda.multi_value_headers.enabled - Indicates whether the request and response headers that are exchanged between the load balancer and the Lambda function include arrays of values or strings. The value is true or false. The default is false. If the value is false and the request contains a duplicate header field name or query parameter key, the load balancer uses the last value sent by the client.

The following attributes are supported only by Network Load Balancers:

  • deregistration_delay.connection_termination.enabled - Indicates whether the load balancer terminates connections at the end of the deregistration timeout. The value is true or false. The default is false.

  • proxy_protocol_v2.enabled - Indicates whether Proxy Protocol version 2 is enabled. The value is true or false. The default is false.

" }, "Value":{ "shape":"TargetGroupAttributeValue", @@ -3154,7 +3184,7 @@ }, "Reason":{ "shape":"TargetHealthReasonEnum", - "documentation":"

The reason code.

If the target state is healthy, a reason code is not provided.

If the target state is initial, the reason code can be one of the following values:

  • Elb.RegistrationInProgress - The target is in the process of being registered with the load balancer.

  • Elb.InitialHealthChecking - The load balancer is still sending the target the minimum number of health checks required to determine its health status.

If the target state is unhealthy, the reason code can be one of the following values:

  • Target.ResponseCodeMismatch - The health checks did not return an expected HTTP code. Applies only to Application Load Balancers.

  • Target.Timeout - The health check requests timed out. Applies only to Application Load Balancers.

  • Target.FailedHealthChecks - The load balancer received an error while establishing a connection to the target or the target response was malformed.

  • Elb.InternalError - The health checks failed due to an internal error. Applies only to Application Load Balancers.

If the target state is unused, the reason code can be one of the following values:

  • Target.NotRegistered - The target is not registered with the target group.

  • Target.NotInUse - The target group is not used by any load balancer or the target is in an Availability Zone that is not enabled for its load balancer.

  • Target.InvalidState - The target is in the stopped or terminated state.

  • Target.IpUnusable - The target IP address is reserved for use by a load balancer.

If the target state is draining, the reason code can be the following value:

  • Target.DeregistrationInProgress - The target is in the process of being deregistered and the deregistration delay period has not expired.

If the target state is unavailable, the reason code can be the following value:

  • Target.HealthCheckDisabled - Health checks are disabled for the target group. Applies only to Application Load Balancers.

  • Elb.InternalError - Target health is unavailable due to an internal error. Applies only to Network Load Balancers.

" + "documentation":"

The reason code.

If the target state is healthy, a reason code is not provided.

If the target state is initial, the reason code can be one of the following values:

  • Elb.RegistrationInProgress - The target is in the process of being registered with the load balancer.

  • Elb.InitialHealthChecking - The load balancer is still sending the target the minimum number of health checks required to determine its health status.

If the target state is unhealthy, the reason code can be one of the following values:

  • Target.ResponseCodeMismatch - The health checks did not return an expected HTTP code. Applies only to Application Load Balancers and Gateway Load Balancers.

  • Target.Timeout - The health check requests timed out. Applies only to Application Load Balancers and Gateway Load Balancers.

  • Target.FailedHealthChecks - The load balancer received an error while establishing a connection to the target or the target response was malformed.

  • Elb.InternalError - The health checks failed due to an internal error. Applies only to Application Load Balancers.

If the target state is unused, the reason code can be one of the following values:

  • Target.NotRegistered - The target is not registered with the target group.

  • Target.NotInUse - The target group is not used by any load balancer or the target is in an Availability Zone that is not enabled for its load balancer.

  • Target.InvalidState - The target is in the stopped or terminated state.

  • Target.IpUnusable - The target IP address is reserved for use by a load balancer.

If the target state is draining, the reason code can be the following value:

  • Target.DeregistrationInProgress - The target is in the process of being deregistered and the deregistration delay period has not expired.

If the target state is unavailable, the reason code can be the following value:

  • Target.HealthCheckDisabled - Health checks are disabled for the target group. Applies only to Application Load Balancers.

  • Elb.InternalError - Target health is unavailable due to an internal error. Applies only to Network Load Balancers.

" }, "Description":{ "shape":"Description", @@ -3357,5 +3387,5 @@ "VpcId":{"type":"string"}, "ZoneName":{"type":"string"} }, - "documentation":"Elastic Load Balancing

A load balancer distributes incoming traffic across targets, such as your EC2 instances. This enables you to increase the availability of your application. The load balancer also monitors the health of its registered targets and ensures that it routes traffic only to healthy targets. You configure your load balancer to accept incoming traffic by specifying one or more listeners, which are configured with a protocol and port number for connections from clients to the load balancer. You configure a target group with a protocol and port number for connections from the load balancer to the targets, and with health check settings to be used when checking the health status of the targets.

Elastic Load Balancing supports the following types of load balancers: Application Load Balancers, Network Load Balancers, and Classic Load Balancers. This reference covers Application Load Balancers and Network Load Balancers.

An Application Load Balancer makes routing and load balancing decisions at the application layer (HTTP/HTTPS). A Network Load Balancer makes routing and load balancing decisions at the transport layer (TCP/TLS). Both Application Load Balancers and Network Load Balancers can route requests to one or more ports on each EC2 instance or container instance in your virtual private cloud (VPC). For more information, see the Elastic Load Balancing User Guide.

All Elastic Load Balancing operations are idempotent, which means that they complete at most one time. If you repeat an operation, it succeeds.

" + "documentation":"Elastic Load Balancing

A load balancer distributes incoming traffic across targets, such as your EC2 instances. This enables you to increase the availability of your application. The load balancer also monitors the health of its registered targets and ensures that it routes traffic only to healthy targets. You configure your load balancer to accept incoming traffic by specifying one or more listeners, which are configured with a protocol and port number for connections from clients to the load balancer. You configure a target group with a protocol and port number for connections from the load balancer to the targets, and with health check settings to be used when checking the health status of the targets.

Elastic Load Balancing supports the following types of load balancers: Application Load Balancers, Network Load Balancers, Gateway Load Balancers, and Classic Load Balancers. This reference covers the following load balancer types:

  • Application Load Balancer - Operates at the application layer (layer 7) and supports HTTP and HTTPS.

  • Network Load Balancer - Operates at the transport layer (layer 4) and supports TCP, TLS, and UDP.

  • Gateway Load Balancer - Operates at the network layer (layer 3).

For more information, see the Elastic Load Balancing User Guide.

All Elastic Load Balancing operations are idempotent, which means that they complete at most one time. If you repeat an operation, it succeeds.

" } diff --git a/services/elasticsearch/pom.xml b/services/elasticsearch/pom.xml index 625f3307a518..bb928d800ea7 100644 --- a/services/elasticsearch/pom.xml +++ b/services/elasticsearch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT elasticsearch AWS Java SDK :: Services :: Amazon Elasticsearch Service diff --git a/services/elasticsearch/src/main/resources/codegen-resources/paginators-1.json b/services/elasticsearch/src/main/resources/codegen-resources/paginators-1.json index 6a5c91f1c0a3..2ca3f2e67f7b 100755 --- a/services/elasticsearch/src/main/resources/codegen-resources/paginators-1.json +++ b/services/elasticsearch/src/main/resources/codegen-resources/paginators-1.json @@ -25,6 +25,11 @@ "output_token": "NextToken", "limit_key": "MaxResults" }, + "GetPackageVersionHistory": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, "GetUpgradeHistory": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/services/elasticsearch/src/main/resources/codegen-resources/service-2.json b/services/elasticsearch/src/main/resources/codegen-resources/service-2.json index 9342cdbd9a9d..d458ed43082d 100644 --- a/services/elasticsearch/src/main/resources/codegen-resources/service-2.json +++ b/services/elasticsearch/src/main/resources/codegen-resources/service-2.json @@ -380,6 +380,23 @@ ], "documentation":"

Returns a list of upgrade compatible Elastisearch versions. You can optionally pass a DomainName to get all upgrade compatible Elasticsearch versions for that specific domain.

" }, + "GetPackageVersionHistory":{ + "name":"GetPackageVersionHistory", + "http":{ + "method":"GET", + "requestUri":"/2015-01-01/packages/{PackageID}/history" + }, + "input":{"shape":"GetPackageVersionHistoryRequest"}, + "output":{"shape":"GetPackageVersionHistoryResponse"}, + "errors":[ + {"shape":"BaseException"}, + {"shape":"InternalException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns a list of versions of the package, along with their creation time and commit message.

" + }, "GetUpgradeHistory":{ "name":"GetUpgradeHistory", "http":{ @@ -589,6 +606,24 @@ ], "documentation":"

Modifies the cluster configuration of the specified Elasticsearch domain, setting as setting the instance type and the number of instances.

" }, + "UpdatePackage":{ + "name":"UpdatePackage", + "http":{ + "method":"POST", + "requestUri":"/2015-01-01/packages/update" + }, + "input":{"shape":"UpdatePackageRequest"}, + "output":{"shape":"UpdatePackageResponse"}, + "errors":[ + {"shape":"BaseException"}, + {"shape":"InternalException"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Updates a package for use with Amazon ES domains.

" + }, "UpgradeElasticsearchDomain":{ "name":"UpgradeElasticsearchDomain", "http":{ @@ -732,6 +767,10 @@ "InternalUserDatabaseEnabled":{ "shape":"Boolean", "documentation":"

True if the internal user database is enabled.

" + }, + "SAMLOptions":{ + "shape":"SAMLOptionsOutput", + "documentation":"

Describes the SAML application configured for a domain.

" } }, "documentation":"

Specifies the advanced security configuration: whether advanced security is enabled, whether the internal database option is enabled.

" @@ -750,6 +789,10 @@ "MasterUserOptions":{ "shape":"MasterUserOptions", "documentation":"

Credentials for the master user: username and password, ARN, or both.

" + }, + "SAMLOptions":{ + "shape":"SAMLOptionsInput", + "documentation":"

Specifies the SAML application configuration for the domain.

" } }, "documentation":"

Specifies the advanced security configuration: whether advanced security is enabled, whether the internal database option is enabled, master username and password (if internal database is enabled), and master user ARN (if IAM is enabled).

" @@ -804,6 +847,11 @@ }, "documentation":"

Container for response returned by AssociatePackage operation.

" }, + "BackendRole":{ + "type":"string", + "max":256, + "min":1 + }, "BaseException":{ "type":"structure", "members":{ @@ -881,6 +929,10 @@ }, "documentation":"

Status of the Cognito options for the specified Elasticsearch domain.

" }, + "CommitMessage":{ + "type":"string", + "max":160 + }, "CompatibleElasticsearchVersionsList":{ "type":"list", "member":{"shape":"CompatibleVersionsMap"} @@ -1531,6 +1583,18 @@ "TLSSecurityPolicy":{ "shape":"TLSSecurityPolicy", "documentation":"

Specify the TLS security policy that needs to be applied to the HTTPS endpoint of Elasticsearch domain.
It can be one of the following values:

  • Policy-Min-TLS-1-0-2019-07: TLS security policy which supports TLSv1.0 and higher.
  • Policy-Min-TLS-1-2-2019-07: TLS security policy which supports only TLSv1.2

" + }, + "CustomEndpointEnabled":{ + "shape":"Boolean", + "documentation":"

Specify if custom endpoint should be enabled for the Elasticsearch domain.

" + }, + "CustomEndpoint":{ + "shape":"DomainNameFqdn", + "documentation":"

Specify the fully qualified domain for your custom endpoint.

" + }, + "CustomEndpointCertificateArn":{ + "shape":"ARN", + "documentation":"

Specify ACM certificate ARN for your custom endpoint.

" } }, "documentation":"

Options to configure endpoint for the Elasticsearch domain.

" @@ -1589,6 +1653,12 @@ "min":3, "pattern":"[a-z][a-z0-9\\-]+" }, + "DomainNameFqdn":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^(((?!-)[A-Za-z0-9-]{0,62}[A-Za-z0-9])\\.)+((?!-)[A-Za-z0-9-]{1,62}[A-Za-z0-9])$" + }, "DomainNameList":{ "type":"list", "member":{"shape":"DomainName"}, @@ -1621,6 +1691,7 @@ "shape":"DomainPackageStatus", "documentation":"

State of the association. Values are ASSOCIATING/ASSOCIATION_FAILED/ACTIVE/DISSOCIATING/DISSOCIATION_FAILED.

" }, + "PackageVersion":{"shape":"PackageVersion"}, "ReferencePath":{ "shape":"ReferencePath", "documentation":"

The relative path on Amazon ES nodes, which can be used as synonym_path when the package is synonym file.

" @@ -2103,6 +2174,43 @@ }, "documentation":"

Container for response returned by GetCompatibleElasticsearchVersions operation.

" }, + "GetPackageVersionHistoryRequest":{ + "type":"structure", + "required":["PackageID"], + "members":{ + "PackageID":{ + "shape":"PackageID", + "documentation":"

Returns an audit history of versions of the package.

", + "location":"uri", + "locationName":"PackageID" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

Limits results to a maximum number of versions.

", + "location":"querystring", + "locationName":"maxResults" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

Used for pagination. Only necessary if a previous API call includes a non-null NextToken value. If provided, returns results for the next page.

", + "location":"querystring", + "locationName":"nextToken" + } + }, + "documentation":"

Container for request parameters to GetPackageVersionHistory operation.

" + }, + "GetPackageVersionHistoryResponse":{ + "type":"structure", + "members":{ + "PackageID":{"shape":"PackageID"}, + "PackageVersionHistoryList":{ + "shape":"PackageVersionHistoryList", + "documentation":"

List of PackageVersionHistory objects.

" + }, + "NextToken":{"shape":"String"} + }, + "documentation":"

Container for response returned by GetPackageVersionHistory operation.

" + }, "GetUpgradeHistoryRequest":{ "type":"structure", "required":["DomainName"], @@ -2737,6 +2845,8 @@ "shape":"CreatedAt", "documentation":"

Timestamp which tells creation date of the package.

" }, + "LastUpdatedAt":{"shape":"LastUpdated"}, + "AvailablePackageVersion":{"shape":"PackageVersion"}, "ErrorDetails":{ "shape":"ErrorDetails", "documentation":"

Additional information if the package is in an error state. Null otherwise.

" @@ -2786,6 +2896,29 @@ "type":"string", "enum":["TXT-DICTIONARY"] }, + "PackageVersion":{"type":"string"}, + "PackageVersionHistory":{ + "type":"structure", + "members":{ + "PackageVersion":{ + "shape":"PackageVersion", + "documentation":"

Version of the package.

" + }, + "CommitMessage":{ + "shape":"CommitMessage", + "documentation":"

A message associated with the version.

" + }, + "CreatedAt":{ + "shape":"CreatedAt", + "documentation":"

Timestamp which tells creation time of the package version.

" + } + }, + "documentation":"

Details of a package version.

" + }, + "PackageVersionHistoryList":{ + "type":"list", + "member":{"shape":"PackageVersionHistory"} + }, "Password":{ "type":"string", "min":8, @@ -3036,6 +3169,94 @@ "min":3 }, "S3Key":{"type":"string"}, + "SAMLEntityId":{ + "type":"string", + "max":512, + "min":8 + }, + "SAMLIdp":{ + "type":"structure", + "required":[ + "MetadataContent", + "EntityId" + ], + "members":{ + "MetadataContent":{ + "shape":"SAMLMetadata", + "documentation":"

The Metadata of the SAML application in xml format.

" + }, + "EntityId":{ + "shape":"SAMLEntityId", + "documentation":"

The unique Entity ID of the application in SAML Identity Provider.

" + } + }, + "documentation":"

Specifies the SAML Identity Provider's information.

" + }, + "SAMLMetadata":{ + "type":"string", + "max":1048576, + "min":1 + }, + "SAMLOptionsInput":{ + "type":"structure", + "members":{ + "Enabled":{ + "shape":"Boolean", + "documentation":"

True if SAML is enabled.

" + }, + "Idp":{ + "shape":"SAMLIdp", + "documentation":"

Specifies the SAML Identity Provider's information.

" + }, + "MasterUserName":{ + "shape":"Username", + "documentation":"

The SAML master username, which is stored in the Amazon Elasticsearch Service domain's internal database.

" + }, + "MasterBackendRole":{ + "shape":"BackendRole", + "documentation":"

The backend role to which the SAML master user is mapped to.

" + }, + "SubjectKey":{ + "shape":"String", + "documentation":"

The key to use for matching the SAML Subject attribute.

" + }, + "RolesKey":{ + "shape":"String", + "documentation":"

The key to use for matching the SAML Roles attribute.

" + }, + "SessionTimeoutMinutes":{ + "shape":"IntegerClass", + "documentation":"

The duration, in minutes, after which a user session becomes inactive. Acceptable values are between 1 and 1440, and the default value is 60.

" + } + }, + "documentation":"

Specifies the SAML application configuration for the domain.

" + }, + "SAMLOptionsOutput":{ + "type":"structure", + "members":{ + "Enabled":{ + "shape":"Boolean", + "documentation":"

True if SAML is enabled.

" + }, + "Idp":{ + "shape":"SAMLIdp", + "documentation":"

Describes the SAML Identity Provider's information.

" + }, + "SubjectKey":{ + "shape":"String", + "documentation":"

The key used for matching the SAML Subject attribute.

" + }, + "RolesKey":{ + "shape":"String", + "documentation":"

The key used for matching the SAML Roles attribute.

" + }, + "SessionTimeoutMinutes":{ + "shape":"IntegerClass", + "documentation":"

The duration, in minutes, after which a user session becomes inactive.

" + } + }, + "documentation":"

Describes the SAML application configured for the domain.

" + }, "ServiceSoftwareOptions":{ "type":"structure", "members":{ @@ -3285,6 +3506,39 @@ }, "documentation":"

The result of an UpdateElasticsearchDomain request. Contains the status of the Elasticsearch domain being updated.

" }, + "UpdatePackageRequest":{ + "type":"structure", + "required":[ + "PackageID", + "PackageSource" + ], + "members":{ + "PackageID":{ + "shape":"PackageID", + "documentation":"

Unique identifier for the package.

" + }, + "PackageSource":{"shape":"PackageSource"}, + "PackageDescription":{ + "shape":"PackageDescription", + "documentation":"

New description of the package.

" + }, + "CommitMessage":{ + "shape":"CommitMessage", + "documentation":"

An info message for the new version which will be shown as part of GetPackageVersionHistoryResponse.

" + } + }, + "documentation":"

Container for request parameters to UpdatePackage operation.

" + }, + "UpdatePackageResponse":{ + "type":"structure", + "members":{ + "PackageDetails":{ + "shape":"PackageDetails", + "documentation":"

Information about the package PackageDetails.

" + } + }, + "documentation":"

Container for response returned by UpdatePackage operation.

" + }, "UpdateTimestamp":{"type":"timestamp"}, "UpgradeElasticsearchDomainRequest":{ "type":"structure", diff --git a/services/elastictranscoder/pom.xml b/services/elastictranscoder/pom.xml index 2ce82f4a999b..ea76eeffe71a 100644 --- a/services/elastictranscoder/pom.xml +++ b/services/elastictranscoder/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT elastictranscoder AWS Java SDK :: Services :: Amazon Elastic Transcoder diff --git a/services/emr/pom.xml b/services/emr/pom.xml index 8ceb2f3139ef..756e3ee2b2d7 100644 --- a/services/emr/pom.xml +++ b/services/emr/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT emr AWS Java SDK :: Services :: Amazon EMR diff --git a/services/emr/src/main/resources/codegen-resources/paginators-1.json b/services/emr/src/main/resources/codegen-resources/paginators-1.json index 7d72c7c103b0..d5b5407b3464 100644 --- a/services/emr/src/main/resources/codegen-resources/paginators-1.json +++ b/services/emr/src/main/resources/codegen-resources/paginators-1.json @@ -42,6 +42,16 @@ "input_token": "Marker", "output_token": "Marker", "result_key": "Steps" + }, + "ListStudioSessionMappings": { + "input_token": "Marker", + "output_token": "Marker", + "result_key": "SessionMappings" + }, + "ListStudios": { + "input_token": "Marker", + "output_token": "Marker", + "result_key": "Studios" } } } \ No newline at end of file diff --git a/services/emr/src/main/resources/codegen-resources/service-2.json b/services/emr/src/main/resources/codegen-resources/service-2.json index e29a3289175a..692b62a9555a 100644 --- a/services/emr/src/main/resources/codegen-resources/service-2.json +++ b/services/emr/src/main/resources/codegen-resources/service-2.json @@ -79,7 +79,7 @@ {"shape":"InternalServerError"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Cancels a pending step or steps in a running cluster. Available only in Amazon EMR versions 4.8.0 and later, excluding version 5.0.0. A maximum of 256 steps are allowed in each CancelSteps request. CancelSteps is idempotent but asynchronous; it does not guarantee a step will be canceled, even if the request is successfully submitted. You can only cancel steps that are in a PENDING state.

" + "documentation":"

Cancels a pending step or steps in a running cluster. Available only in Amazon EMR versions 4.8.0 and later, excluding version 5.0.0. A maximum of 256 steps are allowed in each CancelSteps request. CancelSteps is idempotent but asynchronous; it does not guarantee that a step will be canceled, even if the request is successfully submitted. You can only cancel steps that are in a PENDING state.

" }, "CreateSecurityConfiguration":{ "name":"CreateSecurityConfiguration", @@ -95,6 +95,33 @@ ], "documentation":"

Creates a security configuration, which is stored in the service and can be specified when a cluster is created.

" }, + "CreateStudio":{ + "name":"CreateStudio", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateStudioInput"}, + "output":{"shape":"CreateStudioOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

The Amazon EMR Studio APIs are in preview release for Amazon EMR and are subject to change.

Creates a new Amazon EMR Studio.

" + }, + "CreateStudioSessionMapping":{ + "name":"CreateStudioSessionMapping", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateStudioSessionMappingInput"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

The Amazon EMR Studio APIs are in preview release for Amazon EMR and are subject to change.

Maps a user or group to the Amazon EMR Studio specified by StudioId, and applies a session policy to refine Studio permissions for that user or group.

" + }, "DeleteSecurityConfiguration":{ "name":"DeleteSecurityConfiguration", "http":{ @@ -109,6 +136,32 @@ ], "documentation":"

Deletes a security configuration.

" }, + "DeleteStudio":{ + "name":"DeleteStudio", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteStudioInput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

The Amazon EMR Studio APIs are in preview release for Amazon EMR and are subject to change.

Removes an Amazon EMR Studio from the Studio metadata store.

" + }, + "DeleteStudioSessionMapping":{ + "name":"DeleteStudioSessionMapping", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteStudioSessionMappingInput"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

The Amazon EMR Studio APIs are in preview release for Amazon EMR and are subject to change.

Removes a user or group from an Amazon EMR Studio.

" + }, "DescribeCluster":{ "name":"DescribeCluster", "http":{ @@ -134,7 +187,7 @@ "errors":[ {"shape":"InternalServerError"} ], - "documentation":"

This API is deprecated and will eventually be removed. We recommend you use ListClusters, DescribeCluster, ListSteps, ListInstanceGroups and ListBootstrapActions instead.

DescribeJobFlows returns a list of job flows that match all of the supplied parameters. The parameters can include a list of job flow IDs, job flow states, and restrictions on job flow creation date and time.

Regardless of supplied parameters, only job flows created within the last two months are returned.

If no parameters are supplied, then job flows matching either of the following criteria are returned:

  • Job flows created and completed in the last two weeks

  • Job flows created within the last two months that are in one of the following states: RUNNING, WAITING, SHUTTING_DOWN, STARTING

Amazon EMR can return a maximum of 512 job flow descriptions.

", + "documentation":"

This API is no longer supported and will eventually be removed. We recommend you use ListClusters, DescribeCluster, ListSteps, ListInstanceGroups and ListBootstrapActions instead.

DescribeJobFlows returns a list of job flows that match all of the supplied parameters. The parameters can include a list of job flow IDs, job flow states, and restrictions on job flow creation date and time.

Regardless of supplied parameters, only job flows created within the last two months are returned.

If no parameters are supplied, then job flows matching either of the following criteria are returned:

  • Job flows created and completed in the last two weeks

  • Job flows created within the last two months that are in one of the following states: RUNNING, WAITING, SHUTTING_DOWN, STARTING

Amazon EMR can return a maximum of 512 job flow descriptions.

", "deprecated":true }, "DescribeNotebookExecution":{ @@ -179,6 +232,20 @@ ], "documentation":"

Provides more detail about the cluster step.

" }, + "DescribeStudio":{ + "name":"DescribeStudio", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeStudioInput"}, + "output":{"shape":"DescribeStudioOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

The Amazon EMR Studio APIs are in preview release for Amazon EMR and are subject to change.

Returns details for the specified Amazon EMR Studio including ID, Name, VPC, Studio access URL, and so on.

" + }, "GetBlockPublicAccessConfiguration":{ "name":"GetBlockPublicAccessConfiguration", "http":{ @@ -201,7 +268,21 @@ }, "input":{"shape":"GetManagedScalingPolicyInput"}, "output":{"shape":"GetManagedScalingPolicyOutput"}, - "documentation":"

Fetches the attached managed scaling policy for an Amazon EMR cluster.

" + "documentation":"

Fetches the attached managed scaling policy for an Amazon EMR cluster.

" + }, + "GetStudioSessionMapping":{ + "name":"GetStudioSessionMapping", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetStudioSessionMappingInput"}, + "output":{"shape":"GetStudioSessionMappingOutput"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

The Amazon EMR Studio APIs are in preview release for Amazon EMR and are subject to change.

Fetches mapping details for the specified Amazon EMR Studio and identity (user or group).

" }, "ListBootstrapActions":{ "name":"ListBootstrapActions", @@ -315,6 +396,34 @@ ], "documentation":"

Provides a list of steps for the cluster in reverse order unless you specify stepIds with the request of filter by StepStates. You can specify a maximum of ten stepIDs.

" }, + "ListStudioSessionMappings":{ + "name":"ListStudioSessionMappings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListStudioSessionMappingsInput"}, + "output":{"shape":"ListStudioSessionMappingsOutput"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

The Amazon EMR Studio APIs are in preview release for Amazon EMR and are subject to change.

Returns a list of all user or group session mappings for the EMR Studio specified by StudioId.

" + }, + "ListStudios":{ + "name":"ListStudios", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListStudiosInput"}, + "output":{"shape":"ListStudiosOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

The Amazon EMR Studio APIs are in preview release for Amazon EMR and are subject to change.

Returns a list of all Amazon EMR Studios associated with the AWS account. The list includes details such as ID, Studio Access URL, and creation time for each Studio.

" + }, "ModifyCluster":{ "name":"ModifyCluster", "http":{ @@ -386,7 +495,7 @@ }, "input":{"shape":"PutManagedScalingPolicyInput"}, "output":{"shape":"PutManagedScalingPolicyOutput"}, - "documentation":"

Creates or updates a managed scaling policy for an Amazon EMR cluster. The managed scaling policy defines the limits for resources, such as EC2 instances that can be added or terminated from a cluster. The policy only applies to the core and task nodes. The master node cannot be scaled after initial configuration.

" + "documentation":"

Creates or updates a managed scaling policy for an Amazon EMR cluster. The managed scaling policy defines the limits for resources, such as EC2 instances that can be added or terminated from a cluster. The policy only applies to the core and task nodes. The master node cannot be scaled after initial configuration.

" }, "RemoveAutoScalingPolicy":{ "name":"RemoveAutoScalingPolicy", @@ -497,6 +606,19 @@ {"shape":"InternalServerError"} ], "documentation":"

TerminateJobFlows shuts a list of clusters (job flows) down. When a job flow is shut down, any step not yet completed is canceled and the EC2 instances on which the cluster is running are stopped. Any log files not already saved are uploaded to Amazon S3 if a LogUri was specified when the cluster was created.

The maximum number of clusters allowed is 10. The call to TerminateJobFlows is asynchronous. Depending on the configuration of the cluster, it may take up to 1-5 minutes for the cluster to completely terminate and release allocated resources, such as Amazon EC2 instances.

" + }, + "UpdateStudioSessionMapping":{ + "name":"UpdateStudioSessionMapping", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateStudioSessionMappingInput"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

The Amazon EMR Studio APIs are in preview release for Amazon EMR and are subject to change.

Updates the session policy attached to the user or group for the specified Amazon EMR Studio.

" } }, "shapes":{ @@ -620,7 +742,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

A list of tags to associate with a cluster and propagate to EC2 instances. Tags are user-defined key/value pairs that consist of a required key string with a maximum of 128 characters, and an optional value string with a maximum of 256 characters.

" + "documentation":"

A list of tags to associate with a cluster and propagate to EC2 instances. Tags are user-defined key-value pairs that consist of a required key string with a maximum of 128 characters, and an optional value string with a maximum of 256 characters.

" } }, "documentation":"

This input identifies a cluster and a list of tags to attach.

" @@ -670,6 +792,13 @@ "max":2048, "min":20 }, + "AuthMode":{ + "type":"string", + "enum":[ + "SSO", + "IAM" + ] + }, "AutoScalingPolicy":{ "type":"structure", "required":[ @@ -759,7 +888,7 @@ "members":{ "BlockPublicSecurityGroupRules":{ "shape":"Boolean", - "documentation":"

Indicates whether EMR block public access is enabled (true) or disabled (false). By default, the value is false for accounts that have created EMR clusters before July 2019. For accounts created after this, the default is true.

" + "documentation":"

Indicates whether Amazon EMR block public access is enabled (true) or disabled (false). By default, the value is false for accounts that have created EMR clusters before July 2019. For accounts created after this, the default is true.

" }, "PermittedPublicSecurityGroupRuleRanges":{ "shape":"PortRanges", @@ -855,7 +984,7 @@ "members":{ "ClusterId":{ "shape":"XmlStringMaxLen256", - "documentation":"

The ClusterID for which specified steps will be canceled. Use RunJobFlow and ListClusters to get ClusterIDs.

" + "documentation":"

The ClusterID for the specified steps that will be canceled. Use RunJobFlow and ListClusters to get ClusterIDs.

" }, "StepIds":{ "shape":"StepIdsList", @@ -863,7 +992,7 @@ }, "StepCancellationOption":{ "shape":"StepCancellationOption", - "documentation":"

The option to choose for cancelling RUNNING steps. By default, the value is SEND_INTERRUPT.

" + "documentation":"

The option to choose to cancel RUNNING steps. By default, the value is SEND_INTERRUPT.

" } }, "documentation":"

The input argument to the CancelSteps operation.

" @@ -1022,7 +1151,7 @@ }, "ScaleDownBehavior":{ "shape":"ScaleDownBehavior", - "documentation":"

The way that individual Amazon EC2 instances terminate when an automatic scale-in activity occurs or an instance group is resized. TERMINATE_AT_INSTANCE_HOUR indicates that Amazon EMR terminates nodes at the instance-hour boundary, regardless of when the request to terminate the instance was submitted. This option is only available with Amazon EMR 5.1.0 and later and is the default for clusters created using that version. TERMINATE_AT_TASK_COMPLETION indicates that Amazon EMR blacklists and drains tasks from nodes before terminating the Amazon EC2 instances, regardless of the instance-hour boundary. With either behavior, Amazon EMR removes the least active nodes first and blocks instance termination if it could lead to HDFS corruption. TERMINATE_AT_TASK_COMPLETION is available only in Amazon EMR version 4.1.0 and later, and is the default for versions of Amazon EMR earlier than 5.1.0.

" + "documentation":"

The way that individual Amazon EC2 instances terminate when an automatic scale-in activity occurs or an instance group is resized. TERMINATE_AT_INSTANCE_HOUR indicates that Amazon EMR terminates nodes at the instance-hour boundary, regardless of when the request to terminate the instance was submitted. This option is only available with Amazon EMR 5.1.0 and later and is the default for clusters created using that version. TERMINATE_AT_TASK_COMPLETION indicates that Amazon EMR adds nodes to a deny list and drains tasks from nodes before terminating the Amazon EC2 instances, regardless of the instance-hour boundary. With either behavior, Amazon EMR removes the least active nodes first and blocks instance termination if it could lead to HDFS corruption. TERMINATE_AT_TASK_COMPLETION is available only in Amazon EMR version 4.1.0 and later, and is the default for versions of Amazon EMR earlier than 5.1.0.

" }, "CustomAmiId":{ "shape":"XmlStringMaxLen256", @@ -1030,7 +1159,7 @@ }, "EbsRootVolumeSize":{ "shape":"Integer", - "documentation":"

The size, in GiB, of the EBS root device volume of the Linux AMI that is used for each EC2 instance. Available in Amazon EMR version 4.x and later.

" + "documentation":"

The size, in GiB, of the Amazon EBS root device volume of the Linux AMI that is used for each EC2 instance. Available in Amazon EMR version 4.x and later.

" }, "RepoUpgradeOnBoot":{ "shape":"RepoUpgradeOnBoot", @@ -1038,7 +1167,7 @@ }, "KerberosAttributes":{ "shape":"KerberosAttributes", - "documentation":"

Attributes for Kerberos configuration when Kerberos authentication is enabled using a security configuration. For more information see Use Kerberos Authentication in the EMR Management Guide.

" + "documentation":"

Attributes for Kerberos configuration when Kerberos authentication is enabled using a security configuration. For more information see Use Kerberos Authentication in the Amazon EMR Management Guide.

" }, "ClusterArn":{ "shape":"ArnType", @@ -1164,7 +1293,7 @@ }, "ReadyDateTime":{ "shape":"Date", - "documentation":"

The date and time when the cluster was ready to execute steps.

" + "documentation":"

The date and time when the cluster was ready to run steps.

" }, "EndDateTime":{ "shape":"Date", @@ -1218,19 +1347,19 @@ }, "MinimumCapacityUnits":{ "shape":"Integer", - "documentation":"

The lower boundary of EC2 units. It is measured through VCPU cores or instances for instance groups and measured through units for instance fleets. Managed scaling activities are not allowed beyond this boundary. The limit only applies to the core and task nodes. The master node cannot be scaled after initial configuration.

" + "documentation":"

The lower boundary of EC2 units. It is measured through vCPU cores or instances for instance groups and measured through units for instance fleets. Managed scaling activities are not allowed beyond this boundary. The limit only applies to the core and task nodes. The master node cannot be scaled after initial configuration.

" }, "MaximumCapacityUnits":{ "shape":"Integer", - "documentation":"

The upper boundary of EC2 units. It is measured through VCPU cores or instances for instance groups and measured through units for instance fleets. Managed scaling activities are not allowed beyond this boundary. The limit only applies to the core and task nodes. The master node cannot be scaled after initial configuration.

" + "documentation":"

The upper boundary of EC2 units. It is measured through vCPU cores or instances for instance groups and measured through units for instance fleets. Managed scaling activities are not allowed beyond this boundary. The limit only applies to the core and task nodes. The master node cannot be scaled after initial configuration.

" }, "MaximumOnDemandCapacityUnits":{ "shape":"Integer", - "documentation":"

The upper boundary of On-Demand EC2 units. It is measured through VCPU cores or instances for instance groups and measured through units for instance fleets. The On-Demand units are not allowed to scale beyond this boundary. The parameter is used to split capacity allocation between On-Demand and Spot instances.

" + "documentation":"

The upper boundary of On-Demand EC2 units. It is measured through vCPU cores or instances for instance groups and measured through units for instance fleets. The On-Demand units are not allowed to scale beyond this boundary. The parameter is used to split capacity allocation between On-Demand and Spot Instances.

" }, "MaximumCoreCapacityUnits":{ "shape":"Integer", - "documentation":"

The upper boundary of EC2 units for core node type in a cluster. It is measured through VCPU cores or instances for instance groups and measured through units for instance fleets. The core units are not allowed to scale beyond this boundary. The parameter is used to split capacity allocation between core and task nodes.

" + "documentation":"

The upper boundary of EC2 units for core node type in a cluster. It is measured through vCPU cores or instances for instance groups and measured through units for instance fleets. The core units are not allowed to scale beyond this boundary. The parameter is used to split capacity allocation between core and task nodes.

" } }, "documentation":"

The EC2 unit limits for a managed scaling policy. The managed scaling activity of a cluster can not be above or below these limits. The limit only applies to the core and task nodes. The master node cannot be scaled after initial configuration.

" @@ -1299,6 +1428,108 @@ } } }, + "CreateStudioInput":{ + "type":"structure", + "required":[ + "Name", + "AuthMode", + "VpcId", + "SubnetIds", + "ServiceRole", + "UserRole", + "WorkspaceSecurityGroupId", + "EngineSecurityGroupId" + ], + "members":{ + "Name":{ + "shape":"XmlStringMaxLen256", + "documentation":"

A descriptive name for the Amazon EMR Studio.

" + }, + "Description":{ + "shape":"XmlStringMaxLen256", + "documentation":"

A detailed description of the Studio.

" + }, + "AuthMode":{ + "shape":"AuthMode", + "documentation":"

Specifies whether the Studio authenticates users using single sign-on (SSO) or IAM. Amazon EMR Studio currently only supports SSO authentication.

" + }, + "VpcId":{ + "shape":"XmlStringMaxLen256", + "documentation":"

The ID of the Amazon Virtual Private Cloud (Amazon VPC) to associate with the Studio.

" + }, + "SubnetIds":{ + "shape":"SubnetIdList", + "documentation":"

A list of subnet IDs to associate with the Studio. The subnets must belong to the VPC specified by VpcId. Studio users can create a Workspace in any of the specified subnets.

" + }, + "ServiceRole":{ + "shape":"XmlString", + "documentation":"

The IAM role that will be assumed by the Amazon EMR Studio. The service role provides a way for Amazon EMR Studio to interoperate with other AWS services.

" + }, + "UserRole":{ + "shape":"XmlString", + "documentation":"

The IAM user role that will be assumed by users and groups logged in to a Studio. The permissions attached to this IAM role can be scoped down for each user or group using session policies.

" + }, + "WorkspaceSecurityGroupId":{ + "shape":"XmlStringMaxLen256", + "documentation":"

The ID of the Amazon EMR Studio Workspace security group. The Workspace security group allows outbound network traffic to resources in the Engine security group, and it must be in the same VPC specified by VpcId.

" + }, + "EngineSecurityGroupId":{ + "shape":"XmlStringMaxLen256", + "documentation":"

The ID of the Amazon EMR Studio Engine security group. The Engine security group allows inbound network traffic from the Workspace security group, and it must be in the same VPC specified by VpcId.

" + }, + "DefaultS3Location":{ + "shape":"XmlString", + "documentation":"

The default Amazon S3 location to back up EMR Studio Workspaces and notebook files. A Studio user can select an alternative Amazon S3 location when creating a Workspace.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

A list of tags to associate with the Studio. Tags are user-defined key-value pairs that consist of a required key string with a maximum of 128 characters, and an optional value string with a maximum of 256 characters.

" + } + } + }, + "CreateStudioOutput":{ + "type":"structure", + "members":{ + "StudioId":{ + "shape":"XmlStringMaxLen256", + "documentation":"

The ID of the Amazon EMR Studio.

" + }, + "Url":{ + "shape":"XmlString", + "documentation":"

The unique Studio access URL.

" + } + } + }, + "CreateStudioSessionMappingInput":{ + "type":"structure", + "required":[ + "StudioId", + "IdentityType", + "SessionPolicyArn" + ], + "members":{ + "StudioId":{ + "shape":"XmlStringMaxLen256", + "documentation":"

The ID of the Amazon EMR Studio to which the user or group will be mapped.

" + }, + "IdentityId":{ + "shape":"XmlStringMaxLen256", + "documentation":"

The globally unique identifier (GUID) of the user or group from the AWS SSO Identity Store. For more information, see UserId and GroupId in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

" + }, + "IdentityName":{ + "shape":"XmlStringMaxLen256", + "documentation":"

The name of the user or group. For more information, see UserName and DisplayName in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

" + }, + "IdentityType":{ + "shape":"IdentityType", + "documentation":"

Specifies whether the identity to map to the Studio is a user or a group.

" + }, + "SessionPolicyArn":{ + "shape":"XmlStringMaxLen256", + "documentation":"

The Amazon Resource Name (ARN) for the session policy that will be applied to the user or group. Session policies refine Studio user permissions without the need to use multiple IAM user roles.

" + } + } + }, "Date":{"type":"timestamp"}, "DeleteSecurityConfigurationInput":{ "type":"structure", @@ -1315,6 +1546,41 @@ "members":{ } }, + "DeleteStudioInput":{ + "type":"structure", + "required":["StudioId"], + "members":{ + "StudioId":{ + "shape":"XmlStringMaxLen256", + "documentation":"

The ID of the Amazon EMR Studio.

" + } + } + }, + "DeleteStudioSessionMappingInput":{ + "type":"structure", + "required":[ + "StudioId", + "IdentityType" + ], + "members":{ + "StudioId":{ + "shape":"XmlStringMaxLen256", + "documentation":"

The ID of the Amazon EMR Studio.

" + }, + "IdentityId":{ + "shape":"XmlStringMaxLen256", + "documentation":"

The globally unique identifier (GUID) of the user or group to remove from the Amazon EMR Studio. For more information, see UserId and GroupId in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

" + }, + "IdentityName":{ + "shape":"XmlStringMaxLen256", + "documentation":"

The name of the user name or group to remove from the Studio. For more information, see UserName and DisplayName in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

" + }, + "IdentityType":{ + "shape":"IdentityType", + "documentation":"

Specifies whether the identity to delete from the Studio is a user or a group.

" + } + } + }, "DescribeClusterInput":{ "type":"structure", "required":["ClusterId"], @@ -1442,6 +1708,25 @@ }, "documentation":"

This output contains the description of the cluster step.

" }, + "DescribeStudioInput":{ + "type":"structure", + "required":["StudioId"], + "members":{ + "StudioId":{ + "shape":"XmlStringMaxLen256", + "documentation":"

The Amazon EMR Studio ID.

" + } + } + }, + "DescribeStudioOutput":{ + "type":"structure", + "members":{ + "Studio":{ + "shape":"Studio", + "documentation":"

The Amazon EMR Studio details.

" + } + } + }, "EC2InstanceIdsList":{ "type":"list", "member":{"shape":"InstanceId"} @@ -1607,7 +1892,7 @@ }, "Message":{ "shape":"String", - "documentation":"

The descriptive message including the error the EMR service has identified as the cause of step failure. This is text from an error log that describes the root cause of the failure.

" + "documentation":"

The descriptive message including the error the Amazon EMR service has identified as the cause of step failure. This is text from an error log that describes the root cause of the failure.

" }, "LogFile":{ "shape":"String", @@ -1644,7 +1929,7 @@ "members":{ "ClusterId":{ "shape":"ClusterId", - "documentation":"

Specifies the ID of the cluster for which the managed scaling policy will be fetched.

" + "documentation":"

Specifies the ID of the cluster for which the managed scaling policy will be fetched.

" } } }, @@ -1653,7 +1938,41 @@ "members":{ "ManagedScalingPolicy":{ "shape":"ManagedScalingPolicy", - "documentation":"

Specifies the managed scaling policy that is attached to an Amazon EMR cluster.

" + "documentation":"

Specifies the managed scaling policy that is attached to an Amazon EMR cluster.

" + } + } + }, + "GetStudioSessionMappingInput":{ + "type":"structure", + "required":[ + "StudioId", + "IdentityType" + ], + "members":{ + "StudioId":{ + "shape":"XmlStringMaxLen256", + "documentation":"

The ID of the Amazon EMR Studio.

" + }, + "IdentityId":{ + "shape":"XmlStringMaxLen256", + "documentation":"

The globally unique identifier (GUID) of the user or group. For more information, see UserId and GroupId in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

" + }, + "IdentityName":{ + "shape":"XmlStringMaxLen256", + "documentation":"

The name of the user or group to fetch. For more information, see UserName and DisplayName in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

" + }, + "IdentityType":{ + "shape":"IdentityType", + "documentation":"

Specifies whether the identity to fetch is a user or a group.

" + } + } + }, + "GetStudioSessionMappingOutput":{ + "type":"structure", + "members":{ + "SessionMapping":{ + "shape":"SessionMappingDetail", + "documentation":"

The session mapping details for the specified Amazon EMR Studio and identity, including session policy ARN and creation time.

" } } }, @@ -1689,7 +2008,7 @@ }, "Properties":{ "shape":"StringMap", - "documentation":"

The list of Java properties that are set when the step runs. You can use these properties to pass key value pairs to your main function.

" + "documentation":"

The list of Java properties that are set when the step runs. You can use these properties to pass key-value pairs to your main function.

" }, "MainClass":{ "shape":"String", @@ -1702,6 +2021,13 @@ }, "documentation":"

A cluster step consisting of a JAR file whose main function will be executed. The main function submits a job for Hadoop to execute and waits for the job to finish or fail.

" }, + "IdentityType":{ + "type":"string", + "enum":[ + "USER", + "GROUP" + ] + }, "Instance":{ "type":"structure", "members":{ @@ -1784,7 +2110,7 @@ }, "TargetOnDemandCapacity":{ "shape":"WholeNumber", - "documentation":"

The target capacity of On-Demand units for the instance fleet, which determines how many On-Demand instances to provision. When the instance fleet launches, Amazon EMR tries to provision On-Demand instances as specified by InstanceTypeConfig. Each instance configuration has a specified WeightedCapacity. When an On-Demand instance is provisioned, the WeightedCapacity units count toward the target capacity. Amazon EMR provisions instances until the target capacity is totally fulfilled, even if this results in an overage. For example, if there are 2 units remaining to fulfill capacity, and Amazon EMR can only provision an instance with a WeightedCapacity of 5 units, the instance is provisioned, and the target capacity is exceeded by 3 units. You can use InstanceFleet$ProvisionedOnDemandCapacity to determine the Spot capacity units that have been provisioned for the instance fleet.

If not specified or set to 0, only Spot instances are provisioned for the instance fleet using TargetSpotCapacity. At least one of TargetSpotCapacity and TargetOnDemandCapacity should be greater than 0. For a master instance fleet, only one of TargetSpotCapacity and TargetOnDemandCapacity can be specified, and its value must be 1.

" + "documentation":"

The target capacity of On-Demand units for the instance fleet, which determines how many On-Demand Instances to provision. When the instance fleet launches, Amazon EMR tries to provision On-Demand Instances as specified by InstanceTypeConfig. Each instance configuration has a specified WeightedCapacity. When an On-Demand Instance is provisioned, the WeightedCapacity units count toward the target capacity. Amazon EMR provisions instances until the target capacity is totally fulfilled, even if this results in an overage. For example, if there are 2 units remaining to fulfill capacity, and Amazon EMR can only provision an instance with a WeightedCapacity of 5 units, the instance is provisioned, and the target capacity is exceeded by 3 units. You can use InstanceFleet$ProvisionedOnDemandCapacity to determine the Spot capacity units that have been provisioned for the instance fleet.

If not specified or set to 0, only Spot Instances are provisioned for the instance fleet using TargetSpotCapacity. At least one of TargetSpotCapacity and TargetOnDemandCapacity should be greater than 0. For a master instance fleet, only one of TargetSpotCapacity and TargetOnDemandCapacity can be specified, and its value must be 1.

" }, "TargetSpotCapacity":{ "shape":"WholeNumber", @@ -1807,7 +2133,7 @@ "documentation":"

Describes the launch specification for an instance fleet.

" } }, - "documentation":"

Describes an instance fleet, which is a group of EC2 instances that host a particular node type (master, core, or task) in an Amazon EMR cluster. Instance fleets can consist of a mix of instance types and On-Demand and Spot instances, which are provisioned to meet a defined target capacity.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.

" + "documentation":"

Describes an instance fleet, which is a group of EC2 instances that host a particular node type (master, core, or task) in an Amazon EMR cluster. Instance fleets can consist of a mix of instance types and On-Demand and Spot Instances, which are provisioned to meet a defined target capacity.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.

" }, "InstanceFleetConfig":{ "type":"structure", @@ -1823,11 +2149,11 @@ }, "TargetOnDemandCapacity":{ "shape":"WholeNumber", - "documentation":"

The target capacity of On-Demand units for the instance fleet, which determines how many On-Demand instances to provision. When the instance fleet launches, Amazon EMR tries to provision On-Demand instances as specified by InstanceTypeConfig. Each instance configuration has a specified WeightedCapacity. When an On-Demand instance is provisioned, the WeightedCapacity units count toward the target capacity. Amazon EMR provisions instances until the target capacity is totally fulfilled, even if this results in an overage. For example, if there are 2 units remaining to fulfill capacity, and Amazon EMR can only provision an instance with a WeightedCapacity of 5 units, the instance is provisioned, and the target capacity is exceeded by 3 units.

If not specified or set to 0, only Spot instances are provisioned for the instance fleet using TargetSpotCapacity. At least one of TargetSpotCapacity and TargetOnDemandCapacity should be greater than 0. For a master instance fleet, only one of TargetSpotCapacity and TargetOnDemandCapacity can be specified, and its value must be 1.

" + "documentation":"

The target capacity of On-Demand units for the instance fleet, which determines how many On-Demand Instances to provision. When the instance fleet launches, Amazon EMR tries to provision On-Demand Instances as specified by InstanceTypeConfig. Each instance configuration has a specified WeightedCapacity. When an On-Demand Instance is provisioned, the WeightedCapacity units count toward the target capacity. Amazon EMR provisions instances until the target capacity is totally fulfilled, even if this results in an overage. For example, if there are 2 units remaining to fulfill capacity, and Amazon EMR can only provision an instance with a WeightedCapacity of 5 units, the instance is provisioned, and the target capacity is exceeded by 3 units.

If not specified or set to 0, only Spot Instances are provisioned for the instance fleet using TargetSpotCapacity. At least one of TargetSpotCapacity and TargetOnDemandCapacity should be greater than 0. For a master instance fleet, only one of TargetSpotCapacity and TargetOnDemandCapacity can be specified, and its value must be 1.

" }, "TargetSpotCapacity":{ "shape":"WholeNumber", - "documentation":"

The target capacity of Spot units for the instance fleet, which determines how many Spot instances to provision. When the instance fleet launches, Amazon EMR tries to provision Spot instances as specified by InstanceTypeConfig. Each instance configuration has a specified WeightedCapacity. When a Spot instance is provisioned, the WeightedCapacity units count toward the target capacity. Amazon EMR provisions instances until the target capacity is totally fulfilled, even if this results in an overage. For example, if there are 2 units remaining to fulfill capacity, and Amazon EMR can only provision an instance with a WeightedCapacity of 5 units, the instance is provisioned, and the target capacity is exceeded by 3 units.

If not specified or set to 0, only On-Demand instances are provisioned for the instance fleet. At least one of TargetSpotCapacity and TargetOnDemandCapacity should be greater than 0. For a master instance fleet, only one of TargetSpotCapacity and TargetOnDemandCapacity can be specified, and its value must be 1.

" + "documentation":"

The target capacity of Spot units for the instance fleet, which determines how many Spot Instances to provision. When the instance fleet launches, Amazon EMR tries to provision Spot Instances as specified by InstanceTypeConfig. Each instance configuration has a specified WeightedCapacity. When a Spot Instance is provisioned, the WeightedCapacity units count toward the target capacity. Amazon EMR provisions instances until the target capacity is totally fulfilled, even if this results in an overage. For example, if there are 2 units remaining to fulfill capacity, and Amazon EMR can only provision an instance with a WeightedCapacity of 5 units, the instance is provisioned, and the target capacity is exceeded by 3 units.

If not specified or set to 0, only On-Demand Instances are provisioned for the instance fleet. At least one of TargetSpotCapacity and TargetOnDemandCapacity should be greater than 0. For a master instance fleet, only one of TargetSpotCapacity and TargetOnDemandCapacity can be specified, and its value must be 1.

" }, "InstanceTypeConfigs":{ "shape":"InstanceTypeConfigList", @@ -1873,14 +2199,14 @@ "members":{ "SpotSpecification":{ "shape":"SpotProvisioningSpecification", - "documentation":"

The launch specification for Spot instances in the fleet, which determines the defined duration, provisioning timeout behavior, and allocation strategy.

" + "documentation":"

The launch specification for Spot Instances in the fleet, which determines the defined duration, provisioning timeout behavior, and allocation strategy.

" }, "OnDemandSpecification":{ "shape":"OnDemandProvisioningSpecification", - "documentation":"

The launch specification for On-Demand instances in the instance fleet, which determines the allocation strategy.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions. On-Demand instances allocation strategy is available in Amazon EMR version 5.12.1 and later.

" + "documentation":"

The launch specification for On-Demand Instances in the instance fleet, which determines the allocation strategy.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions. On-Demand Instances allocation strategy is available in Amazon EMR version 5.12.1 and later.

" } }, - "documentation":"

The launch specification for Spot instances in the fleet, which determines the defined duration, provisioning timeout behavior, and allocation strategy.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions. On-Demand and Spot instance allocation strategies are available in Amazon EMR version 5.12.1 and later.

" + "documentation":"

The launch specification for Spot Instances in the fleet, which determines the defined duration, provisioning timeout behavior, and allocation strategy.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions. On-Demand and Spot Instance allocation strategies are available in Amazon EMR version 5.12.1 and later.

" }, "InstanceFleetState":{ "type":"string", @@ -1982,7 +2308,7 @@ }, "BidPrice":{ "shape":"String", - "documentation":"

The bid price for each EC2 Spot instance type as defined by InstanceType. Expressed in USD. If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice is provided, BidPriceAsPercentageOfOnDemandPrice defaults to 100%.

" + "documentation":"

The bid price for each EC2 Spot Instance type as defined by InstanceType. Expressed in USD. If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice is provided, BidPriceAsPercentageOfOnDemandPrice defaults to 100%.

" }, "InstanceType":{ "shape":"InstanceType", @@ -2057,7 +2383,7 @@ }, "BidPrice":{ "shape":"XmlStringMaxLen256", - "documentation":"

The bid price for each EC2 Spot instance type as defined by InstanceType. Expressed in USD. If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice is provided, BidPriceAsPercentageOfOnDemandPrice defaults to 100%.

" + "documentation":"

The bid price for each EC2 Spot Instance type as defined by InstanceType. Expressed in USD. If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice is provided, BidPriceAsPercentageOfOnDemandPrice defaults to 100%.

" }, "InstanceType":{ "shape":"InstanceType", @@ -2116,7 +2442,7 @@ }, "BidPrice":{ "shape":"XmlStringMaxLen256", - "documentation":"

The bid price for each EC2 Spot instance type as defined by InstanceType. Expressed in USD. If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice is provided, BidPriceAsPercentageOfOnDemandPrice defaults to 100%.

" + "documentation":"

The bid price for each EC2 Spot Instance type as defined by InstanceType. Expressed in USD. If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice is provided, BidPriceAsPercentageOfOnDemandPrice defaults to 100%.

" }, "InstanceType":{ "shape":"InstanceType", @@ -2176,7 +2502,7 @@ "members":{ "InstanceGroupId":{ "shape":"XmlStringMaxLen256", - "documentation":"

Unique ID of the instance group to expand or shrink.

" + "documentation":"

Unique ID of the instance group to modify.

" }, "InstanceCount":{ "shape":"Integer", @@ -2413,22 +2739,22 @@ }, "BidPrice":{ "shape":"XmlStringMaxLen256", - "documentation":"

The bid price for each EC2 Spot instance type as defined by InstanceType. Expressed in USD. If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice is provided, BidPriceAsPercentageOfOnDemandPrice defaults to 100%.

" + "documentation":"

The bid price for each EC2 Spot Instance type as defined by InstanceType. Expressed in USD. If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice is provided, BidPriceAsPercentageOfOnDemandPrice defaults to 100%.

" }, "BidPriceAsPercentageOfOnDemandPrice":{ "shape":"NonNegativeDouble", - "documentation":"

The bid price, as a percentage of On-Demand price, for each EC2 Spot instance as defined by InstanceType. Expressed as a number (for example, 20 specifies 20%). If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice is provided, BidPriceAsPercentageOfOnDemandPrice defaults to 100%.

" + "documentation":"

The bid price, as a percentage of On-Demand price, for each EC2 Spot Instance as defined by InstanceType. Expressed as a number (for example, 20 specifies 20%). If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice is provided, BidPriceAsPercentageOfOnDemandPrice defaults to 100%.

" }, "EbsConfiguration":{ "shape":"EbsConfiguration", - "documentation":"

The configuration of Amazon Elastic Block Storage (EBS) attached to each instance as defined by InstanceType.

" + "documentation":"

The configuration of Amazon Elastic Block Storage (Amazon EBS) attached to each instance as defined by InstanceType.

" }, "Configurations":{ "shape":"ConfigurationList", "documentation":"

A configuration classification that applies when provisioning cluster instances, which can include configurations for applications and software that run on the cluster.

" } }, - "documentation":"

An instance type configuration for each instance type in an instance fleet, which determines the EC2 instances Amazon EMR attempts to provision to fulfill On-Demand and Spot target capacities. There can be a maximum of 5 instance type configurations in a fleet.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.

" + "documentation":"

An instance type configuration for each instance type in an instance fleet, which determines the EC2 instances Amazon EMR attempts to provision to fulfill On-Demand and Spot target capacities. There can be a maximum of five instance type configurations in a fleet.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.

" }, "InstanceTypeConfigList":{ "type":"list", @@ -2447,11 +2773,11 @@ }, "BidPrice":{ "shape":"XmlStringMaxLen256", - "documentation":"

The bid price for each EC2 Spot instance type as defined by InstanceType. Expressed in USD.

" + "documentation":"

The bid price for each EC2 Spot Instance type as defined by InstanceType. Expressed in USD.

" }, "BidPriceAsPercentageOfOnDemandPrice":{ "shape":"NonNegativeDouble", - "documentation":"

The bid price, as a percentage of On-Demand price, for each EC2 Spot instance as defined by InstanceType. Expressed as a number (for example, 20 specifies 20%).

" + "documentation":"

The bid price, as a percentage of On-Demand price, for each EC2 Spot Instance as defined by InstanceType. Expressed as a number (for example, 20 specifies 20%).

" }, "Configurations":{ "shape":"ConfigurationList", @@ -2459,7 +2785,7 @@ }, "EbsBlockDevices":{ "shape":"EbsBlockDeviceList", - "documentation":"

The configuration of Amazon Elastic Block Storage (EBS) attached to each instance as defined by InstanceType.

" + "documentation":"

The configuration of Amazon Elastic Block Storage (Amazon EBS) attached to each instance as defined by InstanceType.

" }, "EbsOptimized":{ "shape":"BooleanObject", @@ -2554,7 +2880,7 @@ }, "SupportedProducts":{ "shape":"SupportedProductsList", - "documentation":"

A list of strings set by third party software when the job flow is launched. If you are not using third party software to manage the job flow this value is empty.

" + "documentation":"

A list of strings set by third-party software when the job flow is launched. If you are not using third-party software to manage the job flow, this value is empty.

" }, "VisibleToAllUsers":{ "shape":"Boolean", @@ -2566,7 +2892,7 @@ }, "ServiceRole":{ "shape":"XmlString", - "documentation":"

The IAM role that will be assumed by the Amazon EMR service to access AWS resources on your behalf.

" + "documentation":"

The IAM role that is assumed by the Amazon EMR service to access AWS resources on your behalf.

" }, "AutoScalingRole":{ "shape":"XmlString", @@ -2574,7 +2900,7 @@ }, "ScaleDownBehavior":{ "shape":"ScaleDownBehavior", - "documentation":"

The way that individual Amazon EC2 instances terminate when an automatic scale-in activity occurs or an instance group is resized. TERMINATE_AT_INSTANCE_HOUR indicates that Amazon EMR terminates nodes at the instance-hour boundary, regardless of when the request to terminate the instance was submitted. This option is only available with Amazon EMR 5.1.0 and later and is the default for clusters created using that version. TERMINATE_AT_TASK_COMPLETION indicates that Amazon EMR blacklists and drains tasks from nodes before terminating the Amazon EC2 instances, regardless of the instance-hour boundary. With either behavior, Amazon EMR removes the least active nodes first and blocks instance termination if it could lead to HDFS corruption. TERMINATE_AT_TASK_COMPLETION available only in Amazon EMR version 4.1.0 and later, and is the default for versions of Amazon EMR earlier than 5.1.0.

" + "documentation":"

The way that individual Amazon EC2 instances terminate when an automatic scale-in activity occurs or an instance group is resized. TERMINATE_AT_INSTANCE_HOUR indicates that Amazon EMR terminates nodes at the instance-hour boundary, regardless of when the request to terminate the instance was submitted. This option is only available with Amazon EMR 5.1.0 and later and is the default for clusters created using that version. TERMINATE_AT_TASK_COMPLETION indicates that Amazon EMR adds nodes to a deny list and drains tasks from nodes before terminating the Amazon EC2 instances, regardless of the instance-hour boundary. With either behavior, Amazon EMR removes the least active nodes first and blocks instance termination if it could lead to HDFS corruption. TERMINATE_AT_TASK_COMPLETION available only in Amazon EMR version 4.1.0 and later, and is the default for versions of Amazon EMR earlier than 5.1.0.

" } }, "documentation":"

A description of a cluster (job flow).

" @@ -2660,7 +2986,7 @@ }, "Ec2KeyName":{ "shape":"XmlStringMaxLen256", - "documentation":"

The name of the EC2 key pair that can be used to ssh to the master node as the user called \"hadoop.\"

" + "documentation":"

The name of the EC2 key pair that can be used to connect to the master node using SSH as the user called \"hadoop.\"

" }, "Placement":{ "shape":"PlacementType", @@ -2676,7 +3002,7 @@ }, "HadoopVersion":{ "shape":"XmlStringMaxLen256", - "documentation":"

Applies only to Amazon EMR release versions earlier than 4.0. The Hadoop version for the cluster. Valid inputs are \"0.18\" (deprecated), \"0.20\" (deprecated), \"0.20.205\" (deprecated), \"1.0.3\", \"2.2.0\", or \"2.4.0\". If you do not set this value, the default of 0.18 is used, unless the AmiVersion parameter is set in the RunJobFlow call, in which case the default version of Hadoop for that AMI version is used.

" + "documentation":"

Applies only to Amazon EMR release versions earlier than 4.0. The Hadoop version for the cluster. Valid inputs are \"0.18\" (no longer maintained), \"0.20\" (no longer maintained), \"0.20.205\" (no longer maintained), \"1.0.3\", \"2.2.0\", or \"2.4.0\". If you do not set this value, the default of 0.18 is used, unless the AmiVersion parameter is set in the RunJobFlow call, in which case the default version of Hadoop for that AMI version is used.

" }, "Ec2SubnetId":{ "shape":"XmlStringMaxLen256", @@ -2707,7 +3033,7 @@ "documentation":"

A list of additional Amazon EC2 security group IDs for the core and task nodes.

" } }, - "documentation":"

A description of the Amazon EC2 instance on which the cluster (job flow) runs. A valid JobFlowInstancesConfig must contain either InstanceGroups or InstanceFleets, which is the recommended configuration. They cannot be used together. You may also have MasterInstanceType, SlaveInstanceType, and InstanceCount (all three must be present), but we don't recommend this configuration.

" + "documentation":"

A description of the Amazon EC2 instance on which the cluster (job flow) runs. A valid JobFlowInstancesConfig must contain either InstanceGroups or InstanceFleets. They cannot be used together. You may also have MasterInstanceType, SlaveInstanceType, and InstanceCount (all three must be present), but we don't recommend this configuration.

" }, "JobFlowInstancesDetail":{ "type":"structure", @@ -2743,11 +3069,11 @@ }, "NormalizedInstanceHours":{ "shape":"Integer", - "documentation":"

An approximation of the cost of the cluster, represented in m1.small/hours. This value is incremented one time for every hour that an m1.small runs. Larger instances are weighted more, so an Amazon EC2 instance that is roughly four times more expensive would result in the normalized instance hours being incremented by four. This result is only an approximation and does not reflect the actual billing rate.

" + "documentation":"

An approximation of the cost of the cluster, represented in m1.small/hours. This value is increased one time for every hour that an m1.small instance runs. Larger instances are weighted more heavily, so an Amazon EC2 instance that is roughly four times more expensive would result in the normalized instance hours being increased incrementally four times. This result is only an approximation and does not reflect the actual billing rate.

" }, "Ec2KeyName":{ "shape":"XmlStringMaxLen256", - "documentation":"

The name of an Amazon EC2 key pair that can be used to ssh to the master node.

" + "documentation":"

The name of an Amazon EC2 key pair that can be used to connect to the master node using SSH.

" }, "Ec2SubnetId":{ "shape":"XmlStringMaxLen256", @@ -2800,21 +3126,21 @@ "documentation":"

The Active Directory password for ADDomainJoinUser.

" } }, - "documentation":"

Attributes for Kerberos configuration when Kerberos authentication is enabled using a security configuration. For more information see Use Kerberos Authentication in the EMR Management Guide.

" + "documentation":"

Attributes for Kerberos configuration when Kerberos authentication is enabled using a security configuration. For more information see Use Kerberos Authentication in the Amazon EMR Management Guide.

" }, "KeyValue":{ "type":"structure", "members":{ "Key":{ "shape":"XmlString", - "documentation":"

The unique identifier of a key value pair.

" + "documentation":"

The unique identifier of a key-value pair.

" }, "Value":{ "shape":"XmlString", "documentation":"

The value part of the identified key.

" } }, - "documentation":"

A key value pair.

" + "documentation":"

A key-value pair.

" }, "KeyValueList":{ "type":"list", @@ -3087,6 +3413,58 @@ }, "documentation":"

This output contains the list of steps returned in reverse order. This means that the last step is the first element in the list.

" }, + "ListStudioSessionMappingsInput":{ + "type":"structure", + "members":{ + "StudioId":{ + "shape":"XmlStringMaxLen256", + "documentation":"

The ID of the Amazon EMR Studio.

" + }, + "IdentityType":{ + "shape":"IdentityType", + "documentation":"

Specifies whether to return session mappings for users or groups. If not specified, the results include session mapping details for both users and groups.

" + }, + "Marker":{ + "shape":"Marker", + "documentation":"

The pagination token that indicates the set of results to retrieve.

" + } + } + }, + "ListStudioSessionMappingsOutput":{ + "type":"structure", + "members":{ + "SessionMappings":{ + "shape":"SessionMappingSummaryList", + "documentation":"

A list of session mapping summary objects. Each object includes session mapping details such as creation time, identity type (user or group), and Studio ID.

" + }, + "Marker":{ + "shape":"Marker", + "documentation":"

The pagination token that indicates the next set of results to retrieve.

" + } + } + }, + "ListStudiosInput":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"Marker", + "documentation":"

The pagination token that indicates the set of results to retrieve.

" + } + } + }, + "ListStudiosOutput":{ + "type":"structure", + "members":{ + "Studios":{ + "shape":"StudioSummaryList", + "documentation":"

The list of Studio summary objects.

" + }, + "Marker":{ + "shape":"Marker", + "documentation":"

The pagination token that indicates the next set of results to retrieve.

" + } + } + }, "Long":{"type":"long"}, "ManagedScalingPolicy":{ "type":"structure", @@ -3239,7 +3617,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

A list of tags associated with a notebook execution. Tags are user-defined key value pairs that consist of a required key string with a maximum of 128 characters and an optional value string with a maximum of 256 characters.

" + "documentation":"

A list of tags associated with a notebook execution. Tags are user-defined key-value pairs that consist of a required key string with a maximum of 128 characters and an optional value string with a maximum of 256 characters.

" } }, "documentation":"

A notebook execution. An execution is a specific instance that an EMR Notebook is run using the StartNotebookExecution action.

" @@ -3303,10 +3681,10 @@ "members":{ "AllocationStrategy":{ "shape":"OnDemandProvisioningAllocationStrategy", - "documentation":"

Specifies the strategy to use in launching On-Demand instance fleets. Currently, the only option is lowest-price (the default), which launches the lowest price first.

" + "documentation":"

Specifies the strategy to use in launching On-Demand Instance fleets. Currently, the only option is lowest-price (the default), which launches the lowest price first.

" } }, - "documentation":"

The launch specification for On-Demand instances in the instance fleet, which determines the allocation strategy.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions. On-Demand instances allocation strategy is available in Amazon EMR version 5.12.1 and later.

" + "documentation":"

The launch specification for On-Demand Instances in the instance fleet, which determines the allocation strategy.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions. On-Demand Instances allocation strategy is available in Amazon EMR version 5.12.1 and later.

" }, "OptionalArnType":{ "type":"string", @@ -3446,11 +3824,11 @@ "members":{ "ClusterId":{ "shape":"ClusterId", - "documentation":"

Specifies the ID of an EMR cluster where the managed scaling policy is attached.

" + "documentation":"

Specifies the ID of an EMR cluster where the managed scaling policy is attached.

" }, "ManagedScalingPolicy":{ "shape":"ManagedScalingPolicy", - "documentation":"

Specifies the constraints for the managed scaling policy.

" + "documentation":"

Specifies the constraints for the managed scaling policy.

" } } }, @@ -3545,7 +3923,7 @@ }, "LogEncryptionKmsKeyId":{ "shape":"XmlString", - "documentation":"

The AWS KMS customer master key (CMK) used for encrypting log files. If a value is not provided, the logs will remain encrypted by AES-256. This attribute is only available with EMR version 5.30.0 and later, excluding EMR 6.0.0.

" + "documentation":"

The AWS KMS customer master key (CMK) used for encrypting log files. If a value is not provided, the logs remain encrypted by AES-256. This attribute is only available with Amazon EMR version 5.30.0 and later, excluding Amazon EMR 6.0.0.

" }, "AdditionalInfo":{ "shape":"XmlString", @@ -3613,7 +3991,7 @@ }, "ScaleDownBehavior":{ "shape":"ScaleDownBehavior", - "documentation":"

Specifies the way that individual Amazon EC2 instances terminate when an automatic scale-in activity occurs or an instance group is resized. TERMINATE_AT_INSTANCE_HOUR indicates that Amazon EMR terminates nodes at the instance-hour boundary, regardless of when the request to terminate the instance was submitted. This option is only available with Amazon EMR 5.1.0 and later and is the default for clusters created using that version. TERMINATE_AT_TASK_COMPLETION indicates that Amazon EMR blacklists and drains tasks from nodes before terminating the Amazon EC2 instances, regardless of the instance-hour boundary. With either behavior, Amazon EMR removes the least active nodes first and blocks instance termination if it could lead to HDFS corruption. TERMINATE_AT_TASK_COMPLETION available only in Amazon EMR version 4.1.0 and later, and is the default for versions of Amazon EMR earlier than 5.1.0.

" + "documentation":"

Specifies the way that individual Amazon EC2 instances terminate when an automatic scale-in activity occurs or an instance group is resized. TERMINATE_AT_INSTANCE_HOUR indicates that Amazon EMR terminates nodes at the instance-hour boundary, regardless of when the request to terminate the instance was submitted. This option is only available with Amazon EMR 5.1.0 and later and is the default for clusters created using that version. TERMINATE_AT_TASK_COMPLETION indicates that Amazon EMR adds nodes to a deny list and drains tasks from nodes before terminating the Amazon EC2 instances, regardless of the instance-hour boundary. With either behavior, Amazon EMR removes the least active nodes first and blocks instance termination if it could lead to HDFS corruption. TERMINATE_AT_TASK_COMPLETION available only in Amazon EMR version 4.1.0 and later, and is the default for versions of Amazon EMR earlier than 5.1.0.

" }, "CustomAmiId":{ "shape":"XmlStringMaxLen256", @@ -3621,7 +3999,7 @@ }, "EbsRootVolumeSize":{ "shape":"Integer", - "documentation":"

The size, in GiB, of the EBS root device volume of the Linux AMI that is used for each EC2 instance. Available in Amazon EMR version 4.x and later.

" + "documentation":"

The size, in GiB, of the Amazon EBS root device volume of the Linux AMI that is used for each EC2 instance. Available in Amazon EMR version 4.x and later.

" }, "RepoUpgradeOnBoot":{ "shape":"RepoUpgradeOnBoot", @@ -3629,7 +4007,7 @@ }, "KerberosAttributes":{ "shape":"KerberosAttributes", - "documentation":"

Attributes for Kerberos configuration when Kerberos authentication is enabled using a security configuration. For more information see Use Kerberos Authentication in the EMR Management Guide.

" + "documentation":"

Attributes for Kerberos configuration when Kerberos authentication is enabled using a security configuration. For more information see Use Kerberos Authentication in the Amazon EMR Management Guide.

" }, "StepConcurrencyLevel":{ "shape":"Integer", @@ -3779,6 +4157,74 @@ "type":"list", "member":{"shape":"XmlStringMaxLen256"} }, + "SessionMappingDetail":{ + "type":"structure", + "members":{ + "StudioId":{ + "shape":"XmlStringMaxLen256", + "documentation":"

The ID of the Amazon EMR Studio.

" + }, + "IdentityId":{ + "shape":"XmlStringMaxLen256", + "documentation":"

The globally unique identifier (GUID) of the user or group.

" + }, + "IdentityName":{ + "shape":"XmlStringMaxLen256", + "documentation":"

The name of the user or group. For more information, see UserName and DisplayName in the AWS SSO Identity Store API Reference.

" + }, + "IdentityType":{ + "shape":"IdentityType", + "documentation":"

Specifies whether the identity mapped to the Studio is a user or a group.

" + }, + "SessionPolicyArn":{ + "shape":"XmlStringMaxLen256", + "documentation":"

The Amazon Resource Name (ARN) of the session policy associated with the user or group.

" + }, + "CreationTime":{ + "shape":"Date", + "documentation":"

The time the session mapping was created.

" + }, + "LastModifiedTime":{ + "shape":"Date", + "documentation":"

The time the session mapping was last modified.

" + } + }, + "documentation":"

Details for an Amazon EMR Studio session mapping including creation time, user or group ID, Studio ID, and so on.

" + }, + "SessionMappingSummary":{ + "type":"structure", + "members":{ + "StudioId":{ + "shape":"XmlStringMaxLen256", + "documentation":"

The ID of the Amazon EMR Studio.

" + }, + "IdentityId":{ + "shape":"XmlStringMaxLen256", + "documentation":"

The globally unique identifier (GUID) of the user or group from the AWS SSO Identity Store.

" + }, + "IdentityName":{ + "shape":"XmlStringMaxLen256", + "documentation":"

The name of the user or group. For more information, see UserName and DisplayName in the AWS SSO Identity Store API Reference.

" + }, + "IdentityType":{ + "shape":"IdentityType", + "documentation":"

Specifies whether the identity mapped to the Studio is a user or a group.

" + }, + "SessionPolicyArn":{ + "shape":"XmlStringMaxLen256", + "documentation":"

The Amazon Resource Name (ARN) of the session policy associated with the user or group.

" + }, + "CreationTime":{ + "shape":"Date", + "documentation":"

The time the session mapping was created.

" + } + }, + "documentation":"

Details for an Amazon EMR Studio session mapping. The details do not include the time the session mapping was last modified.

" + }, + "SessionMappingSummaryList":{ + "type":"list", + "member":{"shape":"SessionMappingSummary"} + }, "SetTerminationProtectionInput":{ "type":"structure", "required":[ @@ -3861,22 +4307,22 @@ "members":{ "TimeoutDurationMinutes":{ "shape":"WholeNumber", - "documentation":"

The spot provisioning timeout period in minutes. If Spot instances are not provisioned within this time period, the TimeOutAction is taken. Minimum value is 5 and maximum value is 1440. The timeout applies only during initial provisioning, when the cluster is first created.

" + "documentation":"

The spot provisioning timeout period in minutes. If Spot Instances are not provisioned within this time period, the TimeOutAction is taken. Minimum value is 5 and maximum value is 1440. The timeout applies only during initial provisioning, when the cluster is first created.

" }, "TimeoutAction":{ "shape":"SpotProvisioningTimeoutAction", - "documentation":"

The action to take when TargetSpotCapacity has not been fulfilled when the TimeoutDurationMinutes has expired; that is, when all Spot instances could not be provisioned within the Spot provisioning timeout. Valid values are TERMINATE_CLUSTER and SWITCH_TO_ON_DEMAND. SWITCH_TO_ON_DEMAND specifies that if no Spot instances are available, On-Demand Instances should be provisioned to fulfill any remaining Spot capacity.

" + "documentation":"

The action to take when TargetSpotCapacity has not been fulfilled when the TimeoutDurationMinutes has expired; that is, when all Spot Instances could not be provisioned within the Spot provisioning timeout. Valid values are TERMINATE_CLUSTER and SWITCH_TO_ON_DEMAND. SWITCH_TO_ON_DEMAND specifies that if no Spot Instances are available, On-Demand Instances should be provisioned to fulfill any remaining Spot capacity.

" }, "BlockDurationMinutes":{ "shape":"WholeNumber", - "documentation":"

The defined duration for Spot instances (also known as Spot blocks) in minutes. When specified, the Spot instance does not terminate before the defined duration expires, and defined duration pricing for Spot instances applies. Valid values are 60, 120, 180, 240, 300, or 360. The duration period starts as soon as a Spot instance receives its instance ID. At the end of the duration, Amazon EC2 marks the Spot instance for termination and provides a Spot instance termination notice, which gives the instance a two-minute warning before it terminates.

" + "documentation":"

The defined duration for Spot Instances (also known as Spot blocks) in minutes. When specified, the Spot Instance does not terminate before the defined duration expires, and defined duration pricing for Spot instances applies. Valid values are 60, 120, 180, 240, 300, or 360. The duration period starts as soon as a Spot Instance receives its instance ID. At the end of the duration, Amazon EC2 marks the Spot Instance for termination and provides a Spot Instance termination notice, which gives the instance a two-minute warning before it terminates.

" }, "AllocationStrategy":{ "shape":"SpotProvisioningAllocationStrategy", - "documentation":"

Specifies the strategy to use in launching Spot instance fleets. Currently, the only option is capacity-optimized (the default), which launches instances from Spot instance pools with optimal capacity for the number of instances that are launching.

" + "documentation":"

Specifies the strategy to use in launching Spot Instance fleets. Currently, the only option is capacity-optimized (the default), which launches instances from Spot Instance pools with optimal capacity for the number of instances that are launching.

" } }, - "documentation":"

The launch specification for Spot instances in the instance fleet, which determines the defined duration, provisioning timeout behavior, and allocation strategy.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions. Spot instance allocation strategy is available in Amazon EMR version 5.12.1 and later.

" + "documentation":"

The launch specification for Spot Instances in the instance fleet, which determines the defined duration, provisioning timeout behavior, and allocation strategy.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions. Spot Instance allocation strategy is available in Amazon EMR version 5.12.1 and later.

" }, "SpotProvisioningTimeoutAction":{ "type":"string", @@ -3924,7 +4370,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

A list of tags associated with a notebook execution. Tags are user-defined key value pairs that consist of a required key string with a maximum of 128 characters and an optional value string with a maximum of 256 characters.

" + "documentation":"

A list of tags associated with a notebook execution. Tags are user-defined key-value pairs that consist of a required key string with a maximum of 128 characters and an optional value string with a maximum of 256 characters.

" } } }, @@ -4199,6 +4645,110 @@ "key":{"shape":"String"}, "value":{"shape":"String"} }, + "Studio":{ + "type":"structure", + "members":{ + "StudioId":{ + "shape":"XmlStringMaxLen256", + "documentation":"

The ID of the EMR Studio.

" + }, + "StudioArn":{ + "shape":"XmlStringMaxLen256", + "documentation":"

The Amazon Resource Name (ARN) of the EMR Studio.

" + }, + "Name":{ + "shape":"XmlStringMaxLen256", + "documentation":"

The name of the EMR Studio.

" + }, + "Description":{ + "shape":"XmlStringMaxLen256", + "documentation":"

The detailed description of the EMR Studio.

" + }, + "AuthMode":{ + "shape":"AuthMode", + "documentation":"

Specifies whether the Studio authenticates users using single sign-on (SSO) or IAM.

" + }, + "VpcId":{ + "shape":"XmlStringMaxLen256", + "documentation":"

The ID of the VPC associated with the EMR Studio.

" + }, + "SubnetIds":{ + "shape":"SubnetIdList", + "documentation":"

The list of IDs of the subnets associated with the Amazon EMR Studio.

" + }, + "ServiceRole":{ + "shape":"XmlString", + "documentation":"

The name of the IAM role assumed by the Amazon EMR Studio.

" + }, + "UserRole":{ + "shape":"XmlString", + "documentation":"

The name of the IAM role assumed by users logged in to the Amazon EMR Studio.

" + }, + "WorkspaceSecurityGroupId":{ + "shape":"XmlStringMaxLen256", + "documentation":"

The ID of the Workspace security group associated with the Amazon EMR Studio. The Workspace security group allows outbound network traffic to resources in the Engine security group and to the internet.

" + }, + "EngineSecurityGroupId":{ + "shape":"XmlStringMaxLen256", + "documentation":"

The ID of the Engine security group associated with the Amazon EMR Studio. The Engine security group allows inbound network traffic from resources in the Workspace security group.

" + }, + "Url":{ + "shape":"XmlString", + "documentation":"

The unique access URL of the Amazon EMR Studio.

" + }, + "CreationTime":{ + "shape":"Date", + "documentation":"

The time the Amazon EMR Studio was created.

" + }, + "DefaultS3Location":{ + "shape":"XmlString", + "documentation":"

The default Amazon S3 location to back up Amazon EMR Studio Workspaces and notebook files.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

A list of tags associated with the Amazon EMR Studio.

" + } + }, + "documentation":"

Details for an Amazon EMR Studio including ID, creation time, name, and so on.

" + }, + "StudioSummary":{ + "type":"structure", + "members":{ + "StudioId":{ + "shape":"XmlStringMaxLen256", + "documentation":"

The ID of the Amazon EMR Studio.

" + }, + "Name":{ + "shape":"XmlStringMaxLen256", + "documentation":"

The name of the Amazon EMR Studio.

" + }, + "VpcId":{ + "shape":"XmlStringMaxLen256", + "documentation":"

The ID of the Virtual Private Cloud (Amazon VPC) associated with the Amazon EMR Studio.

" + }, + "Description":{ + "shape":"XmlStringMaxLen256", + "documentation":"

The detailed description of the EMR Studio.

" + }, + "Url":{ + "shape":"XmlStringMaxLen256", + "documentation":"

The unique access URL of the Amazon EMR Studio.

" + }, + "CreationTime":{ + "shape":"Date", + "documentation":"

The time when the Amazon EMR Studio was created.

" + } + }, + "documentation":"

Details for an Amazon EMR Studio, including ID, Name, VPC, and Description. The details do not include subnets, IAM roles, security groups, or tags associated with the Studio.

" + }, + "StudioSummaryList":{ + "type":"list", + "member":{"shape":"StudioSummary"} + }, + "SubnetIdList":{ + "type":"list", + "member":{"shape":"String"} + }, "SupportedProductConfig":{ "type":"structure", "members":{ @@ -4229,7 +4779,7 @@ "documentation":"

A user-defined value, which is optional in a tag. For more information, see Tag Clusters.

" } }, - "documentation":"

A key/value pair containing user-defined metadata that you can associate with an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. For more information, see Tag Clusters.

" + "documentation":"

A key-value pair containing user-defined metadata that you can associate with an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. For more information, see Tag Clusters.

" }, "TagList":{ "type":"list", @@ -4241,7 +4791,7 @@ "members":{ "JobFlowIds":{ "shape":"XmlStringList", - "documentation":"

A list of job flows to be shutdown.

" + "documentation":"

A list of job flows to be shut down.

" } }, "documentation":"

Input to the TerminateJobFlows operation.

" @@ -4278,6 +4828,36 @@ "COUNT_PER_SECOND" ] }, + "UpdateStudioSessionMappingInput":{ + "type":"structure", + "required":[ + "StudioId", + "IdentityType", + "SessionPolicyArn" + ], + "members":{ + "StudioId":{ + "shape":"XmlStringMaxLen256", + "documentation":"

The ID of the EMR Studio.

" + }, + "IdentityId":{ + "shape":"XmlStringMaxLen256", + "documentation":"

The globally unique identifier (GUID) of the user or group. For more information, see UserId and GroupId in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

" + }, + "IdentityName":{ + "shape":"XmlStringMaxLen256", + "documentation":"

The name of the user or group to update. For more information, see UserName and DisplayName in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

" + }, + "IdentityType":{ + "shape":"IdentityType", + "documentation":"

Specifies whether the identity to update is a user or a group.

" + }, + "SessionPolicyArn":{ + "shape":"XmlStringMaxLen256", + "documentation":"

The Amazon Resource Name (ARN) of the session policy to associate with the specified user or group.

" + } + } + }, "VolumeSpecification":{ "type":"structure", "required":[ @@ -4325,5 +4905,5 @@ "member":{"shape":"XmlStringMaxLen256"} } }, - "documentation":"

Amazon EMR is a web service that makes it easy to process large amounts of data efficiently. Amazon EMR uses Hadoop processing combined with several AWS products to do tasks such as web indexing, data mining, log file analysis, machine learning, scientific simulation, and data warehousing.

" + "documentation":"

Amazon EMR is a web service that makes it easier to process large amounts of data efficiently. Amazon EMR uses Hadoop processing combined with several AWS services to do tasks such as web indexing, data mining, log file analysis, machine learning, scientific simulation, and data warehouse management.

" } diff --git a/services/eventbridge/pom.xml b/services/eventbridge/pom.xml index 2c1176541d46..666952feac82 100644 --- a/services/eventbridge/pom.xml +++ b/services/eventbridge/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT eventbridge AWS Java SDK :: Services :: EventBridge diff --git a/services/eventbridge/src/main/resources/codegen-resources/service-2.json b/services/eventbridge/src/main/resources/codegen-resources/service-2.json index a70cd1c482ba..46d473a7b034 100644 --- a/services/eventbridge/src/main/resources/codegen-resources/service-2.json +++ b/services/eventbridge/src/main/resources/codegen-resources/service-2.json @@ -28,6 +28,40 @@ ], "documentation":"

Activates a partner event source that has been deactivated. Once activated, your matching event bus will start receiving events from the event source.

" }, + "CancelReplay":{ + "name":"CancelReplay", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelReplayRequest"}, + "output":{"shape":"CancelReplayResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"IllegalStatusException"}, + {"shape":"InternalException"} + ], + "documentation":"

Cancels the specified replay.

" + }, + "CreateArchive":{ + "name":"CreateArchive", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateArchiveRequest"}, + "output":{"shape":"CreateArchiveResponse"}, + "errors":[ + {"shape":"ConcurrentModificationException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalException"}, + {"shape":"LimitExceededException"}, + {"shape":"InvalidEventPatternException"} + ], + "documentation":"

Creates an archive of events with the specified settings. When you create an archive, incoming events might not immediately start being sent to the archive. Allow a short period of time for changes to take effect. If you do not specify a pattern to filter events sent to the archive, all events are sent to the archive except replayed events. Replayed events are not sent to an archive.

" + }, "CreateEventBus":{ "name":"CreateEventBus", "http":{ @@ -80,6 +114,21 @@ ], "documentation":"

You can use this operation to temporarily stop receiving events from the specified partner event source. The matching event bus is not deleted.

When you deactivate a partner event source, the source goes into PENDING state. If it remains in PENDING state for more than two weeks, it is deleted.

To activate a deactivated partner event source, use ActivateEventSource.

" }, + "DeleteArchive":{ + "name":"DeleteArchive", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteArchiveRequest"}, + "output":{"shape":"DeleteArchiveResponse"}, + "errors":[ + {"shape":"ConcurrentModificationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalException"} + ], + "documentation":"

Deletes the specified archive.

" + }, "DeleteEventBus":{ "name":"DeleteEventBus", "http":{ @@ -122,6 +171,21 @@ ], "documentation":"

Deletes the specified rule.

Before you can delete the rule, you must remove all targets, using RemoveTargets.

When you delete a rule, incoming events might continue to match to the deleted rule. Allow a short period of time for changes to take effect.

Managed rules are rules created and managed by another AWS service on your behalf. These rules are created by those other AWS services to support functionality in those services. You can delete these rules using the Force option, but you should do so only if you are sure the other service is not still using that rule.

" }, + "DescribeArchive":{ + "name":"DescribeArchive", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeArchiveRequest"}, + "output":{"shape":"DescribeArchiveResponse"}, + "errors":[ + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalException"} + ], + "documentation":"

Retrieves details about an archive.

" + }, "DescribeEventBus":{ "name":"DescribeEventBus", "http":{ @@ -166,6 +230,20 @@ ], "documentation":"

An SaaS partner can use this operation to list details about a partner event source that they have created. AWS customers do not use this operation. Instead, AWS customers can use DescribeEventSource to see details about a partner event source that is shared with them.

" }, + "DescribeReplay":{ + "name":"DescribeReplay", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReplayRequest"}, + "output":{"shape":"DescribeReplayResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalException"} + ], + "documentation":"

Retrieves details about a replay. Use DescribeReplay to determine the progress of a running replay. A replay processes events to replay based on the time in the event, and replays them using 1 minute intervals. If you use StartReplay and specify an EventStartTime and an EventEndTime that covers a 20 minute time range, the events are replayed from the first minute of that 20 minute range first. Then the events from the second minute are replayed. You can use DescribeReplay to determine the progress of a replay. The value returned for EventLastReplayedTime indicates the time within the specified time range associated with the last event replayed.

" + }, "DescribeRule":{ "name":"DescribeRule", "http":{ @@ -210,6 +288,20 @@ ], "documentation":"

Enables the specified rule. If the rule does not exist, the operation fails.

When you enable a rule, incoming events might not immediately start matching to a newly enabled rule. Allow a short period of time for changes to take effect.

" }, + "ListArchives":{ + "name":"ListArchives", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListArchivesRequest"}, + "output":{"shape":"ListArchivesResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalException"} + ], + "documentation":"

Lists your archives. You can either list all the archives or you can provide a prefix to match to the archive names. Filter parameters are exclusive.

" + }, "ListEventBuses":{ "name":"ListEventBuses", "http":{ @@ -266,6 +358,19 @@ ], "documentation":"

An SaaS partner can use this operation to list all the partner event source names that they have created. This operation is not used by AWS customers.

" }, + "ListReplays":{ + "name":"ListReplays", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListReplaysRequest"}, + "output":{"shape":"ListReplaysResponse"}, + "errors":[ + {"shape":"InternalException"} + ], + "documentation":"

Lists your replays. You can either list all the replays or you can provide a prefix to match to the replay names. Filter parameters are exclusive.

" + }, "ListRuleNamesByTarget":{ "name":"ListRuleNamesByTarget", "http":{ @@ -360,7 +465,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"PolicyLengthExceededException"}, {"shape":"InternalException"}, - {"shape":"ConcurrentModificationException"} + {"shape":"ConcurrentModificationException"}, + {"shape":"OperationDisabledException"} ], "documentation":"

Running PutPermission permits the specified AWS account or AWS organization to put events to the specified event bus. Amazon EventBridge (CloudWatch Events) rules in your account are triggered by these events arriving to an event bus in your account.

For another account to send events to your account, that external account must have an EventBridge rule with your account's event bus as a target.

To enable multiple AWS accounts to put events to your event bus, run PutPermission once for each of these accounts. Or, if all the accounts are members of the same AWS organization, you can run PutPermission once specifying Principal as \"*\" and specifying the AWS organization ID in Condition, to grant permissions to all accounts in that organization.

If you grant permissions using an organization, then accounts in that organization must specify a RoleArn with proper permissions when they use PutTarget to add your account's event bus as a target. For more information, see Sending and Receiving Events Between AWS Accounts in the Amazon EventBridge User Guide.

The permission policy on the default event bus cannot exceed 10 KB in size.

" }, @@ -409,7 +515,8 @@ "errors":[ {"shape":"ResourceNotFoundException"}, {"shape":"InternalException"}, - {"shape":"ConcurrentModificationException"} + {"shape":"ConcurrentModificationException"}, + {"shape":"OperationDisabledException"} ], "documentation":"

Revokes the permission of another AWS account to be able to put events to the specified event bus. Specify the account to revoke by the StatementId value that you associated with the account when you granted it permission with PutPermission. You can find the StatementId by using DescribeEventBus.

" }, @@ -429,6 +536,23 @@ ], "documentation":"

Removes the specified targets from the specified rule. When the rule is triggered, those targets are no longer be invoked.

When you remove a target, when the associated rule triggers, removed targets might continue to be invoked. Allow a short period of time for changes to take effect.

This action can partially fail if too many requests are made at the same time. If that happens, FailedEntryCount is non-zero in the response and each entry in FailedEntries provides the ID of the failed target and the error code.

" }, + "StartReplay":{ + "name":"StartReplay", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartReplayRequest"}, + "output":{"shape":"StartReplayResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"InvalidEventPatternException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalException"} + ], + "documentation":"

Starts the specified replay. Events are not necessarily replayed in the exact same order that they were added to the archive. A replay processes events to replay based on the time in the event, and replays them using 1 minute intervals. If you specify an EventStartTime and an EventEndTime that covers a 20 minute time range, the events are replayed from the first minute of that 20 minute range first. Then the events from the second minute are replayed. You can use DescribeReplay to determine the progress of a replay. The value returned for EventLastReplayedTime indicates the time within the specified time range associated with the last event replayed.

" + }, "TagResource":{ "name":"TagResource", "http":{ @@ -474,6 +598,23 @@ {"shape":"ManagedRuleException"} ], "documentation":"

Removes one or more tags from the specified EventBridge resource. In Amazon EventBridge (CloudWatch Events, rules and event buses can be tagged.

" + }, + "UpdateArchive":{ + "name":"UpdateArchive", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateArchiveRequest"}, + "output":{"shape":"UpdateArchiveResponse"}, + "errors":[ + {"shape":"ConcurrentModificationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalException"}, + {"shape":"LimitExceededException"}, + {"shape":"InvalidEventPatternException"} + ], + "documentation":"

Updates the specified archive.

" } }, "shapes":{ @@ -499,6 +640,81 @@ } } }, + "Archive":{ + "type":"structure", + "members":{ + "ArchiveName":{ + "shape":"ArchiveName", + "documentation":"

The name of the archive.

" + }, + "EventSourceArn":{ + "shape":"Arn", + "documentation":"

The ARN of the event bus associated with the archive. Only events from this event bus are sent to the archive.

" + }, + "State":{ + "shape":"ArchiveState", + "documentation":"

The current state of the archive.

" + }, + "StateReason":{ + "shape":"ArchiveStateReason", + "documentation":"

A description for the reason that the archive is in the current state.

" + }, + "RetentionDays":{ + "shape":"RetentionDays", + "documentation":"

The number of days to retain events in the archive before they are deleted.

" + }, + "SizeBytes":{ + "shape":"Long", + "documentation":"

The size of the archive, in bytes.

" + }, + "EventCount":{ + "shape":"Long", + "documentation":"

The number of events in the archive.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

The time stamp for the time that the archive was created.

" + } + }, + "documentation":"

An Archive object that contains details about an archive.

" + }, + "ArchiveArn":{ + "type":"string", + "max":1600, + "min":1, + "pattern":"^arn:aws([a-z]|\\-)*:events:([a-z]|\\d|\\-)*:([0-9]{12})?:.+\\/.+$" + }, + "ArchiveDescription":{ + "type":"string", + "max":512, + "pattern":".*" + }, + "ArchiveName":{ + "type":"string", + "max":48, + "min":1, + "pattern":"[\\.\\-_A-Za-z0-9]+" + }, + "ArchiveResponseList":{ + "type":"list", + "member":{"shape":"Archive"} + }, + "ArchiveState":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED", + "CREATING", + "UPDATING", + "CREATE_FAILED", + "UPDATE_FAILED" + ] + }, + "ArchiveStateReason":{ + "type":"string", + "max":512, + "pattern":".*" + }, "Arn":{ "type":"string", "max":1600, @@ -577,11 +793,38 @@ "documentation":"

The retry strategy to use for failed jobs, if the target is an AWS Batch job. If you specify a retry strategy here, it overrides the retry strategy defined in the job definition.

" }, "Boolean":{"type":"boolean"}, + "CancelReplayRequest":{ + "type":"structure", + "required":["ReplayName"], + "members":{ + "ReplayName":{ + "shape":"ReplayName", + "documentation":"

The name of the replay to cancel.

" + } + } + }, + "CancelReplayResponse":{ + "type":"structure", + "members":{ + "ReplayArn":{ + "shape":"ReplayArn", + "documentation":"

The ARN of the replay to cancel.

" + }, + "State":{ + "shape":"ReplayState", + "documentation":"

The current state of the replay.

" + }, + "StateReason":{ + "shape":"ReplayStateReason", + "documentation":"

The reason that the replay is in the current state.

" + } + } + }, "ConcurrentModificationException":{ "type":"structure", "members":{ }, - "documentation":"

There is concurrent modification on a rule or target.

", + "documentation":"

There is concurrent modification on a rule, target, archive, or replay.

", "exception":true }, "Condition":{ @@ -607,6 +850,56 @@ }, "documentation":"

A JSON string which you can use to limit the event bus permissions you are granting to only accounts that fulfill the condition. Currently, the only supported condition is membership in a certain AWS organization. The string must contain Type, Key, and Value fields. The Value field specifies the ID of the AWS organization. Following is an example value for Condition:

'{\"Type\" : \"StringEquals\", \"Key\": \"aws:PrincipalOrgID\", \"Value\": \"o-1234567890\"}'

" }, + "CreateArchiveRequest":{ + "type":"structure", + "required":[ + "ArchiveName", + "EventSourceArn" + ], + "members":{ + "ArchiveName":{ + "shape":"ArchiveName", + "documentation":"

The name for the archive to create.

" + }, + "EventSourceArn":{ + "shape":"Arn", + "documentation":"

The ARN of the event source associated with the archive.

" + }, + "Description":{ + "shape":"ArchiveDescription", + "documentation":"

A description for the archive.

" + }, + "EventPattern":{ + "shape":"EventPattern", + "documentation":"

An event pattern to use to filter events sent to the archive.

" + }, + "RetentionDays":{ + "shape":"RetentionDays", + "documentation":"

The number of days to retain events for. Default value is 0. If set to 0, events are retained indefinitely

" + } + } + }, + "CreateArchiveResponse":{ + "type":"structure", + "members":{ + "ArchiveArn":{ + "shape":"ArchiveArn", + "documentation":"

The ARN of the archive that was created.

" + }, + "State":{ + "shape":"ArchiveState", + "documentation":"

The state of the archive that was created.

" + }, + "StateReason":{ + "shape":"ArchiveStateReason", + "documentation":"

The reason that the archive is in the state.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

The time at which the archive was created.

" + } + } + }, "CreateEventBusRequest":{ "type":"structure", "required":["Name"], @@ -660,6 +953,11 @@ } } }, + "CreatedBy":{ + "type":"string", + "max":128, + "min":1 + }, "Database":{ "type":"string", "max":64, @@ -692,6 +990,21 @@ }, "documentation":"

A DeadLetterConfig object that contains information about a dead-letter queue configuration.

" }, + "DeleteArchiveRequest":{ + "type":"structure", + "required":["ArchiveName"], + "members":{ + "ArchiveName":{ + "shape":"ArchiveName", + "documentation":"

The name of the archive to delete.

" + } + } + }, + "DeleteArchiveResponse":{ + "type":"structure", + "members":{ + } + }, "DeleteEventBusRequest":{ "type":"structure", "required":["Name"], @@ -728,8 +1041,8 @@ "documentation":"

The name of the rule.

" }, "EventBusName":{ - "shape":"EventBusName", - "documentation":"

The event bus associated with the rule. If you omit this, the default event bus is used.

" + "shape":"EventBusNameOrArn", + "documentation":"

The name or ARN of the event bus associated with the rule. If you omit this, the default event bus is used.

" }, "Force":{ "shape":"Boolean", @@ -737,12 +1050,71 @@ } } }, + "DescribeArchiveRequest":{ + "type":"structure", + "required":["ArchiveName"], + "members":{ + "ArchiveName":{ + "shape":"ArchiveName", + "documentation":"

The name of the archive to retrieve.

" + } + } + }, + "DescribeArchiveResponse":{ + "type":"structure", + "members":{ + "ArchiveArn":{ + "shape":"ArchiveArn", + "documentation":"

The ARN of the archive.

" + }, + "ArchiveName":{ + "shape":"ArchiveName", + "documentation":"

The name of the archive.

" + }, + "EventSourceArn":{ + "shape":"Arn", + "documentation":"

The ARN of the event source associated with the archive.

" + }, + "Description":{ + "shape":"ArchiveDescription", + "documentation":"

The description of the archive.

" + }, + "EventPattern":{ + "shape":"EventPattern", + "documentation":"

The event pattern used to filter events sent to the archive.

" + }, + "State":{ + "shape":"ArchiveState", + "documentation":"

The state of the archive.

" + }, + "StateReason":{ + "shape":"ArchiveStateReason", + "documentation":"

The reason that the archive is in the state.

" + }, + "RetentionDays":{ + "shape":"RetentionDays", + "documentation":"

The number of days to retain events for in the archive.

" + }, + "SizeBytes":{ + "shape":"Long", + "documentation":"

The size of the archive in bytes.

" + }, + "EventCount":{ + "shape":"Long", + "documentation":"

The number of events in the archive.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

The time at which the archive was created.

" + } + } + }, "DescribeEventBusRequest":{ "type":"structure", "members":{ "Name":{ - "shape":"EventBusName", - "documentation":"

The name of the event bus to show details for. If you omit this, the default event bus is displayed.

" + "shape":"EventBusNameOrArn", + "documentation":"

The name or ARN of the event bus to show details for. If you omit this, the default event bus is displayed.

" } } }, @@ -825,6 +1197,69 @@ } } }, + "DescribeReplayRequest":{ + "type":"structure", + "required":["ReplayName"], + "members":{ + "ReplayName":{ + "shape":"ReplayName", + "documentation":"

The name of the replay to retrieve.

" + } + } + }, + "DescribeReplayResponse":{ + "type":"structure", + "members":{ + "ReplayName":{ + "shape":"ReplayName", + "documentation":"

The name of the replay.

" + }, + "ReplayArn":{ + "shape":"ReplayArn", + "documentation":"

The ARN of the replay.

" + }, + "Description":{ + "shape":"ReplayDescription", + "documentation":"

The description of the replay.

" + }, + "State":{ + "shape":"ReplayState", + "documentation":"

The current state of the replay.

" + }, + "StateReason":{ + "shape":"ReplayStateReason", + "documentation":"

The reason that the replay is in the current state.

" + }, + "EventSourceArn":{ + "shape":"Arn", + "documentation":"

The ARN of the archive events were replayed from.

" + }, + "Destination":{ + "shape":"ReplayDestination", + "documentation":"

A ReplayDestination object that contains details about the replay.

" + }, + "EventStartTime":{ + "shape":"Timestamp", + "documentation":"

The time stamp of the first event that was last replayed from the archive.

" + }, + "EventEndTime":{ + "shape":"Timestamp", + "documentation":"

The time stamp for the last event that was replayed from the archive.

" + }, + "EventLastReplayedTime":{ + "shape":"Timestamp", + "documentation":"

The time that the event was last replayed.

" + }, + "ReplayStartTime":{ + "shape":"Timestamp", + "documentation":"

A time stamp for the time that the replay started.

" + }, + "ReplayEndTime":{ + "shape":"Timestamp", + "documentation":"

A time stamp for the time that the replay stopped.

" + } + } + }, "DescribeRuleRequest":{ "type":"structure", "required":["Name"], @@ -834,8 +1269,8 @@ "documentation":"

The name of the rule.

" }, "EventBusName":{ - "shape":"EventBusName", - "documentation":"

The event bus associated with the rule. If you omit this, the default event bus is used.

" + "shape":"EventBusNameOrArn", + "documentation":"

The name or ARN of the event bus associated with the rule. If you omit this, the default event bus is used.

" } } }, @@ -876,7 +1311,11 @@ }, "EventBusName":{ "shape":"EventBusName", - "documentation":"

The event bus associated with the rule.

" + "documentation":"

The name of the event bus associated with the rule.

" + }, + "CreatedBy":{ + "shape":"CreatedBy", + "documentation":"

The account ID of the user that created the rule. If you use PutRule to put a rule on an event bus in another account, the other account is the owner of the rule, and the rule ARN includes the account ID for that account. However, the value for CreatedBy is the account ID as the account that created the rule in the other account.

" } } }, @@ -889,8 +1328,8 @@ "documentation":"

The name of the rule.

" }, "EventBusName":{ - "shape":"EventBusName", - "documentation":"

The event bus associated with the rule. If you omit this, the default event bus is used.

" + "shape":"EventBusNameOrArn", + "documentation":"

The name or ARN of the event bus associated with the rule. If you omit this, the default event bus is used.

" } } }, @@ -934,8 +1373,8 @@ "documentation":"

The name of the rule.

" }, "EventBusName":{ - "shape":"EventBusName", - "documentation":"

The event bus associated with the rule. If you omit this, the default event bus is used.

" + "shape":"EventBusNameOrArn", + "documentation":"

The name or ARN of the event bus associated with the rule. If you omit this, the default event bus is used.

" } } }, @@ -969,6 +1408,12 @@ "min":1, "pattern":"[/\\.\\-_A-Za-z0-9]+" }, + "EventBusNameOrArn":{ + "type":"string", + "max":1600, + "min":1, + "pattern":"(arn:aws[\\w-]*:events:[a-z]{2}-[a-z]+-[\\w-]+:[0-9]{12}:event-bus\\/)?[/\\.\\-_A-Za-z0-9]+" + }, "EventId":{"type":"string"}, "EventPattern":{"type":"string"}, "EventResource":{"type":"string"}, @@ -1064,6 +1509,13 @@ }, "documentation":"

These are custom parameter to be used when the target is an API Gateway REST APIs.

" }, + "IllegalStatusException":{ + "type":"structure", + "members":{ + }, + "documentation":"

An error occurred because a replay can be canceled only when the state is Running or Starting.

", + "exception":true + }, "InputTransformer":{ "type":"structure", "required":["InputTemplate"], @@ -1130,7 +1582,7 @@ "type":"structure", "members":{ }, - "documentation":"

You tried to create more rules or add more targets to a rule than is allowed.

", + "documentation":"

The request failed because it attempted to create resource beyond the allowed service quota.

", "exception":true }, "LimitMax100":{ @@ -1142,6 +1594,44 @@ "type":"integer", "min":1 }, + "ListArchivesRequest":{ + "type":"structure", + "members":{ + "NamePrefix":{ + "shape":"ArchiveName", + "documentation":"

A name prefix to filter the archives returned. Only archives with name that match the prefix are returned.

" + }, + "EventSourceArn":{ + "shape":"Arn", + "documentation":"

The ARN of the event source associated with the archive.

" + }, + "State":{ + "shape":"ArchiveState", + "documentation":"

The state of the archive.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token returned by a previous call to retrieve the next set of results.

" + }, + "Limit":{ + "shape":"LimitMax100", + "documentation":"

The maximum number of results to return.

" + } + } + }, + "ListArchivesResponse":{ + "type":"structure", + "members":{ + "Archives":{ + "shape":"ArchiveResponseList", + "documentation":"

An array of Archive objects that include details about an archive.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token returned by a previous call to retrieve the next set of results.

" + } + } + }, "ListEventBusesRequest":{ "type":"structure", "members":{ @@ -1264,6 +1754,44 @@ } } }, + "ListReplaysRequest":{ + "type":"structure", + "members":{ + "NamePrefix":{ + "shape":"ReplayName", + "documentation":"

A name prefix to filter the replays returned. Only replays with name that match the prefix are returned.

" + }, + "State":{ + "shape":"ReplayState", + "documentation":"

The state of the replay.

" + }, + "EventSourceArn":{ + "shape":"Arn", + "documentation":"

The ARN of the event source associated with the replay.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token returned by a previous call to retrieve the next set of results.

" + }, + "Limit":{ + "shape":"LimitMax100", + "documentation":"

The maximum number of replays to retrieve.

" + } + } + }, + "ListReplaysResponse":{ + "type":"structure", + "members":{ + "Replays":{ + "shape":"ReplayList", + "documentation":"

An array of Replay objects that contain information about the replay.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token returned by a previous call to retrieve the next set of results.

" + } + } + }, "ListRuleNamesByTargetRequest":{ "type":"structure", "required":["TargetArn"], @@ -1273,8 +1801,8 @@ "documentation":"

The Amazon Resource Name (ARN) of the target resource.

" }, "EventBusName":{ - "shape":"EventBusName", - "documentation":"

Limits the results to show only the rules associated with the specified event bus.

" + "shape":"EventBusNameOrArn", + "documentation":"

The name or ARN of the event bus to list rules for. If you omit this, the default event bus is used.

" }, "NextToken":{ "shape":"NextToken", @@ -1307,8 +1835,8 @@ "documentation":"

The prefix matching the rule name.

" }, "EventBusName":{ - "shape":"EventBusName", - "documentation":"

Limits the results to show only the rules associated with the specified event bus.

" + "shape":"EventBusNameOrArn", + "documentation":"

The name or ARN of the event bus to list the rules for. If you omit this, the default event bus is used.

" }, "NextToken":{ "shape":"NextToken", @@ -1361,8 +1889,8 @@ "documentation":"

The name of the rule.

" }, "EventBusName":{ - "shape":"EventBusName", - "documentation":"

The event bus associated with the rule. If you omit this, the default event bus is used.

" + "shape":"EventBusNameOrArn", + "documentation":"

The name or ARN of the event bus associated with the rule. If you omit this, the default event bus is used.

" }, "NextToken":{ "shape":"NextToken", @@ -1387,6 +1915,7 @@ } } }, + "Long":{"type":"long"}, "ManagedBy":{ "type":"string", "max":128, @@ -1431,6 +1960,12 @@ "min":1, "pattern":"[\\.\\-_A-Za-z0-9]+" }, + "NonPartnerEventBusNameOrArn":{ + "type":"string", + "max":1600, + "min":1, + "pattern":"(arn:aws[\\w-]*:events:[a-z]{2}-[a-z]+-[\\w-]+:[0-9]{12}:event-bus\\/)?[\\.\\-_A-Za-z0-9]+" + }, "OperationDisabledException":{ "type":"structure", "members":{ @@ -1543,8 +2078,8 @@ "documentation":"

A valid JSON string. There is no other schema imposed. The JSON string may contain fields and nested subobjects.

" }, "EventBusName":{ - "shape":"NonPartnerEventBusName", - "documentation":"

The event bus that will receive the event. Only the rules that are associated with this event bus will be able to match the event.

" + "shape":"NonPartnerEventBusNameOrArn", + "documentation":"

The name or ARN of the event bus to receive the event. Only the rules that are associated with this event bus are used to match the event. If you omit this, the default event bus is used.

" } }, "documentation":"

Represents an event to be submitted.

" @@ -1669,15 +2204,10 @@ }, "PutPermissionRequest":{ "type":"structure", - "required":[ - "Action", - "Principal", - "StatementId" - ], "members":{ "EventBusName":{ "shape":"NonPartnerEventBusName", - "documentation":"

The event bus associated with the rule. If you omit this, the default event bus is used.

" + "documentation":"

The name of the event bus associated with the rule. If you omit this, the default event bus is used.

" }, "Action":{ "shape":"Action", @@ -1694,6 +2224,10 @@ "Condition":{ "shape":"Condition", "documentation":"

This parameter enables you to limit the permission to accounts that fulfill a certain condition, such as being a member of a certain AWS organization. For more information about AWS Organizations, see What Is AWS Organizations in the AWS Organizations User Guide.

If you specify Condition with an AWS organization ID, and specify \"*\" as the value for Principal, you grant permission to all the accounts in the named organization.

The Condition is a JSON string which must contain Type, Key, and Value fields.

" + }, + "Policy":{ + "shape":"String", + "documentation":"

A JSON string that describes the permission policy statement. You can include a Policy parameter in the request instead of using the StatementId, Action, Principal, or Condition parameters.

" } } }, @@ -1730,8 +2264,8 @@ "documentation":"

The list of key-value pairs to associate with the rule.

" }, "EventBusName":{ - "shape":"EventBusName", - "documentation":"

The event bus to associate with this rule. If you omit this, the default event bus is used.

" + "shape":"EventBusNameOrArn", + "documentation":"

The name or ARN of the event bus to associate with this rule. If you omit this, the default event bus is used.

" } } }, @@ -1756,8 +2290,8 @@ "documentation":"

The name of the rule.

" }, "EventBusName":{ - "shape":"EventBusName", - "documentation":"

The name of the event bus associated with the rule. If you omit this, the default event bus is used.

" + "shape":"EventBusNameOrArn", + "documentation":"

The name or ARN of the event bus associated with the rule. If you omit this, the default event bus is used.

" }, "Targets":{ "shape":"TargetList", @@ -1857,12 +2391,15 @@ }, "RemovePermissionRequest":{ "type":"structure", - "required":["StatementId"], "members":{ "StatementId":{ "shape":"StatementId", "documentation":"

The statement ID corresponding to the account that is no longer allowed to put events to the default event bus.

" }, + "RemoveAllPermissions":{ + "shape":"Boolean", + "documentation":"

Specifies whether to remove all permissions.

" + }, "EventBusName":{ "shape":"NonPartnerEventBusName", "documentation":"

The name of the event bus to revoke permissions for. If you omit this, the default event bus is used.

" @@ -1881,8 +2418,8 @@ "documentation":"

The name of the rule.

" }, "EventBusName":{ - "shape":"EventBusName", - "documentation":"

The name of the event bus associated with the rule.

" + "shape":"EventBusNameOrArn", + "documentation":"

The name or ARN of the event bus associated with the rule. If you omit this, the default event bus is used.

" }, "Ids":{ "shape":"TargetIdList", @@ -1929,6 +2466,104 @@ "type":"list", "member":{"shape":"RemoveTargetsResultEntry"} }, + "Replay":{ + "type":"structure", + "members":{ + "ReplayName":{ + "shape":"ReplayName", + "documentation":"

The name of the replay.

" + }, + "EventSourceArn":{ + "shape":"Arn", + "documentation":"

The ARN of the archive to replay event from.

" + }, + "State":{ + "shape":"ReplayState", + "documentation":"

The current state of the replay.

" + }, + "StateReason":{ + "shape":"ReplayStateReason", + "documentation":"

A description of why the replay is in the current state.

" + }, + "EventStartTime":{ + "shape":"Timestamp", + "documentation":"

A time stamp for the time to start replaying events. This is determined by the time in the event as described in Time.

" + }, + "EventEndTime":{ + "shape":"Timestamp", + "documentation":"

A time stamp for the time to start replaying events. Any event with a creation time prior to the EventEndTime specified is replayed.

" + }, + "EventLastReplayedTime":{ + "shape":"Timestamp", + "documentation":"

A time stamp for the time that the last event was replayed.

" + }, + "ReplayStartTime":{ + "shape":"Timestamp", + "documentation":"

A time stamp for the time that the replay started.

" + }, + "ReplayEndTime":{ + "shape":"Timestamp", + "documentation":"

A time stamp for the time that the replay completed.

" + } + }, + "documentation":"

A Replay object that contains details about a replay.

" + }, + "ReplayArn":{ + "type":"string", + "max":1600, + "min":1, + "pattern":"^arn:aws([a-z]|\\-)*:events:([a-z]|\\d|\\-)*:([0-9]{12})?:.+\\/[\\.\\-_A-Za-z0-9]+$" + }, + "ReplayDescription":{ + "type":"string", + "max":512, + "pattern":".*" + }, + "ReplayDestination":{ + "type":"structure", + "required":["Arn"], + "members":{ + "Arn":{ + "shape":"Arn", + "documentation":"

The ARN of the event bus to replay event to. You can replay events only to the event bus specified to create the archive.

" + }, + "FilterArns":{ + "shape":"ReplayDestinationFilters", + "documentation":"

A list of ARNs for rules to replay events to.

" + } + }, + "documentation":"

A ReplayDestination object that contains details about a replay.

" + }, + "ReplayDestinationFilters":{ + "type":"list", + "member":{"shape":"Arn"} + }, + "ReplayList":{ + "type":"list", + "member":{"shape":"Replay"} + }, + "ReplayName":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[\\.\\-_A-Za-z0-9]+" + }, + "ReplayState":{ + "type":"string", + "enum":[ + "STARTING", + "RUNNING", + "CANCELLING", + "COMPLETED", + "CANCELLED", + "FAILED" + ] + }, + "ReplayStateReason":{ + "type":"string", + "max":512, + "pattern":".*" + }, "ResourceAlreadyExistsException":{ "type":"structure", "members":{ @@ -1948,6 +2583,10 @@ "documentation":"

An entity that you specified does not exist.

", "exception":true }, + "RetentionDays":{ + "type":"integer", + "min":0 + }, "RetryPolicy":{ "type":"structure", "members":{ @@ -2004,7 +2643,7 @@ }, "EventBusName":{ "shape":"EventBusName", - "documentation":"

The event bus associated with the rule.

" + "documentation":"

The name or ARN of the event bus associated with the rule. If you omit this, the default event bus is used.

" } }, "documentation":"

Contains information about a rule in Amazon EventBridge.

" @@ -2110,6 +2749,63 @@ }, "documentation":"

This structure includes the custom parameter to be used when the target is an SQS FIFO queue.

" }, + "StartReplayRequest":{ + "type":"structure", + "required":[ + "ReplayName", + "EventSourceArn", + "EventStartTime", + "EventEndTime", + "Destination" + ], + "members":{ + "ReplayName":{ + "shape":"ReplayName", + "documentation":"

The name of the replay to start.

" + }, + "Description":{ + "shape":"ReplayDescription", + "documentation":"

A description for the replay to start.

" + }, + "EventSourceArn":{ + "shape":"Arn", + "documentation":"

The ARN of the archive to replay events from.

" + }, + "EventStartTime":{ + "shape":"Timestamp", + "documentation":"

A time stamp for the time to start replaying events. Only events that occurred between the EventStartTime and EventEndTime are replayed.

" + }, + "EventEndTime":{ + "shape":"Timestamp", + "documentation":"

A time stamp for the time to stop replaying events. Only events that occurred between the EventStartTime and EventEndTime are replayed.

" + }, + "Destination":{ + "shape":"ReplayDestination", + "documentation":"

A ReplayDestination object that includes details about the destination for the replay.

" + } + } + }, + "StartReplayResponse":{ + "type":"structure", + "members":{ + "ReplayArn":{ + "shape":"ReplayArn", + "documentation":"

The ARN of the replay.

" + }, + "State":{ + "shape":"ReplayState", + "documentation":"

The state of the replay.

" + }, + "StateReason":{ + "shape":"ReplayStateReason", + "documentation":"

The reason that the replay is in the state.

" + }, + "ReplayStartTime":{ + "shape":"Timestamp", + "documentation":"

The time at which the replay started.

" + } + } + }, "StatementId":{ "type":"string", "max":64, @@ -2348,6 +3044,49 @@ "type":"structure", "members":{ } + }, + "UpdateArchiveRequest":{ + "type":"structure", + "required":["ArchiveName"], + "members":{ + "ArchiveName":{ + "shape":"ArchiveName", + "documentation":"

The name of the archive to update.

" + }, + "Description":{ + "shape":"ArchiveDescription", + "documentation":"

The description for the archive.

" + }, + "EventPattern":{ + "shape":"EventPattern", + "documentation":"

The event pattern to use to filter events sent to the archive.

" + }, + "RetentionDays":{ + "shape":"RetentionDays", + "documentation":"

The number of days to retain events in the archive.

" + } + } + }, + "UpdateArchiveResponse":{ + "type":"structure", + "members":{ + "ArchiveArn":{ + "shape":"ArchiveArn", + "documentation":"

The ARN of the archive.

" + }, + "State":{ + "shape":"ArchiveState", + "documentation":"

The state of the archive.

" + }, + "StateReason":{ + "shape":"ArchiveStateReason", + "documentation":"

The reason that the archive is in the current state.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

The time at which the archive was updated.

" + } + } } }, "documentation":"

Amazon EventBridge helps you to respond to state changes in your AWS resources. When your resources change state, they automatically send events into an event stream. You can create rules that match selected events in the stream and route them to targets to take action. You can also use rules to take action on a predetermined schedule. For example, you can configure rules to:

  • Automatically invoke an AWS Lambda function to update DNS entries when an event notifies you that Amazon EC2 instance enters the running state.

  • Direct specific API records from AWS CloudTrail to an Amazon Kinesis data stream for detailed analysis of potential security or availability risks.

  • Periodically invoke a built-in target to create a snapshot of an Amazon EBS volume.

For more information about the features of Amazon EventBridge, see the Amazon EventBridge User Guide.

" diff --git a/services/firehose/pom.xml b/services/firehose/pom.xml index 989af713fa35..739df82d74e5 100644 --- a/services/firehose/pom.xml +++ b/services/firehose/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT firehose AWS Java SDK :: Services :: Amazon Kinesis Firehose diff --git a/services/fms/pom.xml b/services/fms/pom.xml index 092805faaafa..cf3fd1a5519b 100644 --- a/services/fms/pom.xml +++ b/services/fms/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT fms AWS Java SDK :: Services :: FMS diff --git a/services/fms/src/main/resources/codegen-resources/service-2.json b/services/fms/src/main/resources/codegen-resources/service-2.json index f1630d447702..d24c217a1445 100644 --- a/services/fms/src/main/resources/codegen-resources/service-2.json +++ b/services/fms/src/main/resources/codegen-resources/service-2.json @@ -66,7 +66,9 @@ "errors":[ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidOperationException"}, - {"shape":"InternalErrorException"} + {"shape":"InternalErrorException"}, + {"shape":"InvalidInputException"}, + {"shape":"LimitExceededException"} ], "documentation":"

Permanently deletes an AWS Firewall Manager policy.

" }, @@ -142,7 +144,7 @@ {"shape":"InvalidInputException"}, {"shape":"InvalidOperationException"} ], - "documentation":"

Returns detailed compliance information about the specified member account. Details include resources that are in and out of compliance with the specified policy. Resources are considered noncompliant for AWS WAF and Shield Advanced policies if the specified policy has not been applied to them. Resources are considered noncompliant for security group policies if they are in scope of the policy, they violate one or more of the policy rules, and remediation is disabled or not possible.

" + "documentation":"

Returns detailed compliance information about the specified member account. Details include resources that are in and out of compliance with the specified policy. Resources are considered noncompliant for AWS WAF and Shield Advanced policies if the specified policy has not been applied to them. Resources are considered noncompliant for security group policies if they are in scope of the policy, they violate one or more of the policy rules, and remediation is disabled or not possible. Resources are considered noncompliant for Network Firewall policies if a firewall is missing in the VPC, if the firewall endpoint isn't set up in an expected Availability Zone and subnet, if a subnet created by the Firewall Manager doesn't have the expected route table, and for modifications to a firewall policy that violate the Firewall Manager policy's rules.

" }, "GetNotificationChannel":{ "name":"GetNotificationChannel", @@ -340,7 +342,7 @@ {"shape":"InvalidOperationException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Designates the IAM role and Amazon Simple Notification Service (SNS) topic that AWS Firewall Manager uses to record SNS logs.

" + "documentation":"

Designates the IAM role and Amazon Simple Notification Service (SNS) topic that AWS Firewall Manager uses to record SNS logs.

To perform this action outside of the console, you must configure the SNS topic to allow the Firewall Manager role AWSServiceRoleForFMS to publish SNS logs. For more information, see Firewall Manager required permissions for API actions in the AWS Firewall Manager Developer Guide.

" }, "PutPolicy":{ "name":"PutPolicy", @@ -358,7 +360,7 @@ {"shape":"InternalErrorException"}, {"shape":"InvalidTypeException"} ], - "documentation":"

Creates an AWS Firewall Manager policy.

Firewall Manager provides the following types of policies:

  • A Shield Advanced policy, which applies Shield Advanced protection to specified accounts and resources

  • An AWS WAF policy (type WAFV2), which defines rule groups to run first in the corresponding AWS WAF web ACL and rule groups to run last in the web ACL.

  • An AWS WAF Classic policy (type WAF), which defines a rule group.

  • A security group policy, which manages VPC security groups across your AWS organization.

Each policy is specific to one of the types. If you want to enforce more than one policy type across accounts, create multiple policies. You can create multiple policies for each type.

You must be subscribed to Shield Advanced to create a Shield Advanced policy. For more information about subscribing to Shield Advanced, see CreateSubscription.

" + "documentation":"

Creates an AWS Firewall Manager policy.

Firewall Manager provides the following types of policies:

  • An AWS WAF policy (type WAFV2), which defines rule groups to run first in the corresponding AWS WAF web ACL and rule groups to run last in the web ACL.

  • An AWS WAF Classic policy (type WAF), which defines a rule group.

  • A Shield Advanced policy, which applies Shield Advanced protection to specified accounts and resources.

  • A security group policy, which manages VPC security groups across your AWS organization.

  • An AWS Network Firewall policy, which provides firewall rules to filter network traffic in specified Amazon VPCs.

Each policy is specific to one of the types. If you want to enforce more than one policy type across accounts, create multiple policies. You can create multiple policies for each type.

You must be subscribed to Shield Advanced to create a Shield Advanced policy. For more information about subscribing to Shield Advanced, see CreateSubscription.

" }, "PutProtocolsList":{ "name":"PutProtocolsList", @@ -603,7 +605,7 @@ }, "ResourceType":{ "shape":"ResourceType", - "documentation":"

The resource type. This is in the format shown in the AWS Resource Types Reference. For example: AWS::ElasticLoadBalancingV2::LoadBalancer or AWS::CloudFront::Distribution.

" + "documentation":"

The resource type. This is in the format shown in the AWS Resource Types Reference. For example: AWS::ElasticLoadBalancingV2::LoadBalancer, AWS::CloudFront::Distribution, or AWS::NetworkFirewall::FirewallPolicy.

" } }, "documentation":"

Details of the resource that is not protected by the policy.

" @@ -686,7 +688,7 @@ "type":"string", "max":1024, "min":1, - "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=,+\\-@]*)$" }, "DisassociateAdminAccountRequest":{ "type":"structure", @@ -929,7 +931,7 @@ }, "ResourceType":{ "shape":"ResourceType", - "documentation":"

The resource type. This is in the format shown in the AWS Resource Types Reference. Supported resource types are: AWS::EC2::Instance, AWS::EC2::NetworkInterface, or AWS::EC2::SecurityGroup.

" + "documentation":"

The resource type. This is in the format shown in the AWS Resource Types Reference. Supported resource types are: AWS::EC2::Instance, AWS::EC2::NetworkInterface, AWS::EC2::SecurityGroup, AWS::NetworkFirewall::FirewallPolicy, and AWS::EC2::Subnet.

" } } }, @@ -1177,6 +1179,136 @@ "type":"list", "member":{"shape":"AWSAccountId"} }, + "NetworkFirewallAction":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[a-zA-Z0-9]+$" + }, + "NetworkFirewallActionList":{ + "type":"list", + "member":{"shape":"NetworkFirewallAction"} + }, + "NetworkFirewallMissingExpectedRTViolation":{ + "type":"structure", + "members":{ + "ViolationTarget":{ + "shape":"ViolationTarget", + "documentation":"

The ID of the AWS Network Firewall or VPC resource that's in violation.

" + }, + "VPC":{ + "shape":"ResourceId", + "documentation":"

The resource ID of the VPC associated with a violating subnet.

" + }, + "AvailabilityZone":{ + "shape":"LengthBoundedString", + "documentation":"

The Availability Zone of a violating subnet.

" + }, + "CurrentRouteTable":{ + "shape":"ResourceId", + "documentation":"

The resource ID of the current route table that's associated with the subnet, if one is available.

" + }, + "ExpectedRouteTable":{ + "shape":"ResourceId", + "documentation":"

The resource ID of the route table that should be associated with the subnet.

" + } + }, + "documentation":"

Violation details for AWS Network Firewall for a subnet that's not associated to the expected Firewall Manager managed route table.

" + }, + "NetworkFirewallMissingFirewallViolation":{ + "type":"structure", + "members":{ + "ViolationTarget":{ + "shape":"ViolationTarget", + "documentation":"

The ID of the AWS Network Firewall or VPC resource that's in violation.

" + }, + "VPC":{ + "shape":"ResourceId", + "documentation":"

The resource ID of the VPC associated with a violating subnet.

" + }, + "AvailabilityZone":{ + "shape":"LengthBoundedString", + "documentation":"

The Availability Zone of a violating subnet.

" + }, + "TargetViolationReason":{ + "shape":"TargetViolationReason", + "documentation":"

The reason the resource has this violation, if one is available.

" + } + }, + "documentation":"

Violation details for AWS Network Firewall for a subnet that doesn't have a Firewall Manager managed firewall in its VPC.

" + }, + "NetworkFirewallMissingSubnetViolation":{ + "type":"structure", + "members":{ + "ViolationTarget":{ + "shape":"ViolationTarget", + "documentation":"

The ID of the AWS Network Firewall or VPC resource that's in violation.

" + }, + "VPC":{ + "shape":"ResourceId", + "documentation":"

The resource ID of the VPC associated with a violating subnet.

" + }, + "AvailabilityZone":{ + "shape":"LengthBoundedString", + "documentation":"

The Availability Zone of a violating subnet.

" + }, + "TargetViolationReason":{ + "shape":"TargetViolationReason", + "documentation":"

The reason the resource has this violation, if one is available.

" + } + }, + "documentation":"

Violation details for AWS Network Firewall for an Availability Zone that's missing the expected Firewall Manager managed subnet.

" + }, + "NetworkFirewallPolicyDescription":{ + "type":"structure", + "members":{ + "StatelessRuleGroups":{ + "shape":"StatelessRuleGroupList", + "documentation":"

The stateless rule groups that are used in the Network Firewall firewall policy.

" + }, + "StatelessDefaultActions":{ + "shape":"NetworkFirewallActionList", + "documentation":"

The actions to take on packets that don't match any of the stateless rule groups.

" + }, + "StatelessFragmentDefaultActions":{ + "shape":"NetworkFirewallActionList", + "documentation":"

The actions to take on packet fragments that don't match any of the stateless rule groups.

" + }, + "StatelessCustomActions":{ + "shape":"NetworkFirewallActionList", + "documentation":"

Names of custom actions that are available for use in the stateless default actions settings.

" + }, + "StatefulRuleGroups":{ + "shape":"StatefulRuleGroupList", + "documentation":"

The stateful rule groups that are used in the Network Firewall firewall policy.

" + } + }, + "documentation":"

The definition of the AWS Network Firewall firewall policy.

" + }, + "NetworkFirewallPolicyModifiedViolation":{ + "type":"structure", + "members":{ + "ViolationTarget":{ + "shape":"ViolationTarget", + "documentation":"

The ID of the AWS Network Firewall or VPC resource that's in violation.

" + }, + "CurrentPolicyDescription":{ + "shape":"NetworkFirewallPolicyDescription", + "documentation":"

The policy that's currently in use in the individual account.

" + }, + "ExpectedPolicyDescription":{ + "shape":"NetworkFirewallPolicyDescription", + "documentation":"

The policy that should be in use in the individual account in order to be compliant.

" + } + }, + "documentation":"

Violation details for AWS Network Firewall for a firewall policy that has a different NetworkFirewallPolicyDescription than is required by the Firewall Manager policy.

" + }, + "NetworkFirewallResourceName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[a-zA-Z0-9-]+$" + }, "PaginationMaxResults":{ "type":"integer", "max":100, @@ -1234,7 +1366,7 @@ }, "ResourceType":{ "shape":"ResourceType", - "documentation":"

The type of resource protected by or in scope of the policy. This is in the format shown in the AWS Resource Types Reference. For AWS WAF and Shield Advanced, examples include AWS::ElasticLoadBalancingV2::LoadBalancer and AWS::CloudFront::Distribution. For a security group common policy, valid values are AWS::EC2::NetworkInterface and AWS::EC2::Instance. For a security group content audit policy, valid values are AWS::EC2::SecurityGroup, AWS::EC2::NetworkInterface, and AWS::EC2::Instance. For a security group usage audit policy, the value is AWS::EC2::SecurityGroup.

" + "documentation":"

The type of resource protected by or in scope of the policy. This is in the format shown in the AWS Resource Types Reference. For AWS WAF and Shield Advanced, examples include AWS::ElasticLoadBalancingV2::LoadBalancer and AWS::CloudFront::Distribution. For a security group common policy, valid values are AWS::EC2::NetworkInterface and AWS::EC2::Instance. For a security group content audit policy, valid values are AWS::EC2::SecurityGroup, AWS::EC2::NetworkInterface, and AWS::EC2::Instance. For a security group usage audit policy, the value is AWS::EC2::SecurityGroup. For an AWS Network Firewall policy, the value is AWS::EC2::VPC.

" }, "ResourceTypeList":{ "shape":"ResourceTypeList", @@ -1365,7 +1497,7 @@ }, "ResourceType":{ "shape":"ResourceType", - "documentation":"

The type of resource protected by or in scope of the policy. This is in the format shown in the AWS Resource Types Reference. For AWS WAF and Shield Advanced, examples include AWS::ElasticLoadBalancingV2::LoadBalancer and AWS::CloudFront::Distribution. For a security group common policy, valid values are AWS::EC2::NetworkInterface and AWS::EC2::Instance. For a security group content audit policy, valid values are AWS::EC2::SecurityGroup, AWS::EC2::NetworkInterface, and AWS::EC2::Instance. For a security group usage audit policy, the value is AWS::EC2::SecurityGroup.

" + "documentation":"

The type of resource protected by or in scope of the policy. This is in the format shown in the AWS Resource Types Reference. For AWS WAF and Shield Advanced, examples include AWS::ElasticLoadBalancingV2::LoadBalancer and AWS::CloudFront::Distribution. For a security group common policy, valid values are AWS::EC2::NetworkInterface and AWS::EC2::Instance. For a security group content audit policy, valid values are AWS::EC2::SecurityGroup, AWS::EC2::NetworkInterface, and AWS::EC2::Instance. For a security group usage audit policy, the value is AWS::EC2::SecurityGroup. For an AWS Network Firewall policy, the value is AWS::EC2::VPC.

" }, "SecurityServiceType":{ "shape":"SecurityServiceType", @@ -1681,6 +1813,22 @@ "AwsEc2InstanceViolation":{ "shape":"AwsEc2InstanceViolation", "documentation":"

Violation details for an EC2 instance.

" + }, + "NetworkFirewallMissingFirewallViolation":{ + "shape":"NetworkFirewallMissingFirewallViolation", + "documentation":"

Violation detail for an Network Firewall policy that indicates that a subnet has no Firewall Manager managed firewall in its VPC.

" + }, + "NetworkFirewallMissingSubnetViolation":{ + "shape":"NetworkFirewallMissingSubnetViolation", + "documentation":"

Violation detail for an Network Firewall policy that indicates that an Availability Zone is missing the expected Firewall Manager managed subnet.

" + }, + "NetworkFirewallMissingExpectedRTViolation":{ + "shape":"NetworkFirewallMissingExpectedRTViolation", + "documentation":"

Violation detail for an Network Firewall policy that indicates that a subnet is not associated with the expected Firewall Manager managed route table.

" + }, + "NetworkFirewallPolicyModifiedViolation":{ + "shape":"NetworkFirewallPolicyModifiedViolation", + "documentation":"

Violation detail for an Network Firewall policy that indicates that a firewall policy in an individual account has been modified in a way that makes it noncompliant. For example, the individual account owner might have deleted a rule group, changed the priority of a stateless rule group, or changed a policy default action.

" } }, "documentation":"

Violation detail based on resource type.

" @@ -1755,7 +1903,7 @@ }, "ManagedServiceData":{ "shape":"ManagedServiceData", - "documentation":"

Details about the service that are specific to the service type, in JSON format. For service type SHIELD_ADVANCED, this is an empty string.

  • Example: WAFV2

    \"ManagedServiceData\": \"{\\\"type\\\":\\\"WAFV2\\\",\\\"defaultAction\\\":{\\\"type\\\":\\\"ALLOW\\\"},\\\"preProcessRuleGroups\\\":[{\\\"managedRuleGroupIdentifier\\\":null,\\\"ruleGroupArn\\\":\\\"rulegrouparn\\\",\\\"overrideAction\\\":{\\\"type\\\":\\\"COUNT\\\"},\\\"excludeRules\\\":[{\\\"name\\\":\\\"EntityName\\\"}],\\\"ruleGroupType\\\":\\\"RuleGroup\\\"}],\\\"postProcessRuleGroups\\\":[{\\\"managedRuleGroupIdentifier\\\":{\\\"managedRuleGroupName\\\":\\\"AWSManagedRulesAdminProtectionRuleSet\\\",\\\"vendorName\\\":\\\"AWS\\\"},\\\"ruleGroupArn\\\":\\\"rulegrouparn\\\",\\\"overrideAction\\\":{\\\"type\\\":\\\"NONE\\\"},\\\"excludeRules\\\":[],\\\"ruleGroupType\\\":\\\"ManagedRuleGroup\\\"}],\\\"overrideCustomerWebACLAssociation\\\":false}\"

  • Example: WAF Classic

    \"ManagedServiceData\": \"{\\\"type\\\": \\\"WAF\\\", \\\"ruleGroups\\\": [{\\\"id\\\": \\\"12345678-1bcd-9012-efga-0987654321ab\\\", \\\"overrideAction\\\" : {\\\"type\\\": \\\"COUNT\\\"}}], \\\"defaultAction\\\": {\\\"type\\\": \\\"BLOCK\\\"}}

  • Example: SECURITY_GROUPS_COMMON

    \"SecurityServicePolicyData\":{\"Type\":\"SECURITY_GROUPS_COMMON\",\"ManagedServiceData\":\"{\\\"type\\\":\\\"SECURITY_GROUPS_COMMON\\\",\\\"revertManualSecurityGroupChanges\\\":false,\\\"exclusiveResourceSecurityGroupManagement\\\":false, \\\"applyToAllEC2InstanceENIs\\\":false,\\\"securityGroups\\\":[{\\\"id\\\":\\\" sg-000e55995d61a06bd\\\"}]}\"},\"RemediationEnabled\":false,\"ResourceType\":\"AWS::EC2::NetworkInterface\"}

  • Example: SECURITY_GROUPS_CONTENT_AUDIT

    \"SecurityServicePolicyData\":{\"Type\":\"SECURITY_GROUPS_CONTENT_AUDIT\",\"ManagedServiceData\":\"{\\\"type\\\":\\\"SECURITY_GROUPS_CONTENT_AUDIT\\\",\\\"securityGroups\\\":[{\\\"id\\\":\\\" sg-000e55995d61a06bd \\\"}],\\\"securityGroupAction\\\":{\\\"type\\\":\\\"ALLOW\\\"}}\"},\"RemediationEnabled\":false,\"ResourceType\":\"AWS::EC2::NetworkInterface\"}

    The security group action for content audit can be ALLOW or DENY. For ALLOW, all in-scope security group rules must be within the allowed range of the policy's security group rules. For DENY, all in-scope security group rules must not contain a value or a range that matches a rule value or range in the policy security group.

  • Example: SECURITY_GROUPS_USAGE_AUDIT

    \"SecurityServicePolicyData\":{\"Type\":\"SECURITY_GROUPS_USAGE_AUDIT\",\"ManagedServiceData\":\"{\\\"type\\\":\\\"SECURITY_GROUPS_USAGE_AUDIT\\\",\\\"deleteUnusedSecurityGroups\\\":true,\\\"coalesceRedundantSecurityGroups\\\":true}\"},\"RemediationEnabled\":false,\"Resou rceType\":\"AWS::EC2::SecurityGroup\"}

" + "documentation":"

Details about the service that are specific to the service type, in JSON format. For service type SHIELD_ADVANCED, this is an empty string.

  • Example: NETWORK_FIREWALL

    \"{\\\"type\\\":\\\"NETWORK_FIREWALL\\\",\\\"networkFirewallStatelessRuleGroupReferences\\\":[{\\\"resourceARN\\\":\\\"arn:aws:network-firewall:us-west-1:1234567891011:stateless-rulegroup/rulegroup2\\\",\\\"priority\\\":10}],\\\"networkFirewallStatelessDefaultActions\\\":[\\\"aws:pass\\\",\\\"custom1\\\"],\\\"networkFirewallStatelessFragmentDefaultActions\\\":[\\\"custom2\\\",\\\"aws:pass\\\"],\\\"networkFirewallStatelessCustomActions\\\":[{\\\"actionName\\\":\\\"custom1\\\",\\\"actionDefinition\\\":{\\\"publishMetricAction\\\":{\\\"dimensions\\\":[{\\\"value\\\":\\\"dimension1\\\"}]}}},{\\\"actionName\\\":\\\"custom2\\\",\\\"actionDefinition\\\":{\\\"publishMetricAction\\\":{\\\"dimensions\\\":[{\\\"value\\\":\\\"dimension2\\\"}]}}}],\\\"networkFirewallStatefulRuleGroupReferences\\\":[{\\\"resourceARN\\\":\\\"arn:aws:network-firewall:us-west-1:1234567891011:stateful-rulegroup/rulegroup1\\\"}],\\\"networkFirewallOrchestrationConfig\\\":{\\\"singleFirewallEndpointPerVPC\\\":true,\\\"allowedIPV4CidrList\\\":[\\\"10.24.34.0/28\\\"]} }\"

  • Example: WAFV2

    \"{\\\"type\\\":\\\"WAFV2\\\",\\\"preProcessRuleGroups\\\":[{\\\"ruleGroupArn\\\":null,\\\"overrideAction\\\":{\\\"type\\\":\\\"NONE\\\"},\\\"managedRuleGroupIdentifier\\\":{\\\"version\\\":null,\\\"vendorName\\\":\\\"AWS\\\",\\\"managedRuleGroupName\\\":\\\"AWSManagedRulesAmazonIpReputationList\\\"},\\\"ruleGroupType\\\":\\\"ManagedRuleGroup\\\",\\\"excludeRules\\\":[]}],\\\"postProcessRuleGroups\\\":[],\\\"defaultAction\\\":{\\\"type\\\":\\\"ALLOW\\\"},\\\"overrideCustomerWebACLAssociation\\\":false,\\\"loggingConfiguration\\\":{\\\"logDestinationConfigs\\\":[\\\"arn:aws:firehose:us-west-2:12345678912:deliverystream/aws-waf-logs-fms-admin-destination\\\"],\\\"redactedFields\\\":[{\\\"redactedFieldType\\\":\\\"SingleHeader\\\",\\\"redactedFieldValue\\\":\\\"Cookies\\\"},{\\\"redactedFieldType\\\":\\\"Method\\\"}]}}\"

    In the loggingConfiguration, you can specify one logDestinationConfigs, you can optionally provide up to 20 redactedFields, and the RedactedFieldType must be one of URI, QUERY_STRING, HEADER, or METHOD.

  • Example: WAF Classic

    \"{\\\"type\\\": \\\"WAF\\\", \\\"ruleGroups\\\": [{\\\"id\\\":\\\"12345678-1bcd-9012-efga-0987654321ab\\\", \\\"overrideAction\\\" : {\\\"type\\\": \\\"COUNT\\\"}}], \\\"defaultAction\\\": {\\\"type\\\": \\\"BLOCK\\\"}}\"

  • Example: SECURITY_GROUPS_COMMON

    \"{\\\"type\\\":\\\"SECURITY_GROUPS_COMMON\\\",\\\"revertManualSecurityGroupChanges\\\":false,\\\"exclusiveResourceSecurityGroupManagement\\\":false, \\\"applyToAllEC2InstanceENIs\\\":false,\\\"securityGroups\\\":[{\\\"id\\\":\\\" sg-000e55995d61a06bd\\\"}]}\"

  • Example: SECURITY_GROUPS_CONTENT_AUDIT

    \"{\\\"type\\\":\\\"SECURITY_GROUPS_CONTENT_AUDIT\\\",\\\"securityGroups\\\":[{\\\"id\\\":\\\"sg-000e55995d61a06bd\\\"}],\\\"securityGroupAction\\\":{\\\"type\\\":\\\"ALLOW\\\"}}\"

    The security group action for content audit can be ALLOW or DENY. For ALLOW, all in-scope security group rules must be within the allowed range of the policy's security group rules. For DENY, all in-scope security group rules must not contain a value or a range that matches a rule value or range in the policy security group.

  • Example: SECURITY_GROUPS_USAGE_AUDIT

    \"{\\\"type\\\":\\\"SECURITY_GROUPS_USAGE_AUDIT\\\",\\\"deleteUnusedSecurityGroups\\\":true,\\\"coalesceRedundantSecurityGroups\\\":true}\"

" } }, "documentation":"

Details about the security service that is being used to protect the resources.

" @@ -1768,9 +1916,55 @@ "SHIELD_ADVANCED", "SECURITY_GROUPS_COMMON", "SECURITY_GROUPS_CONTENT_AUDIT", - "SECURITY_GROUPS_USAGE_AUDIT" + "SECURITY_GROUPS_USAGE_AUDIT", + "NETWORK_FIREWALL" ] }, + "StatefulRuleGroup":{ + "type":"structure", + "members":{ + "RuleGroupName":{ + "shape":"NetworkFirewallResourceName", + "documentation":"

The name of the rule group.

" + }, + "ResourceId":{ + "shape":"ResourceId", + "documentation":"

The resource ID of the rule group.

" + } + }, + "documentation":"

AWS Network Firewall stateful rule group, used in a NetworkFirewallPolicyDescription.

" + }, + "StatefulRuleGroupList":{ + "type":"list", + "member":{"shape":"StatefulRuleGroup"} + }, + "StatelessRuleGroup":{ + "type":"structure", + "members":{ + "RuleGroupName":{ + "shape":"NetworkFirewallResourceName", + "documentation":"

The name of the rule group.

" + }, + "ResourceId":{ + "shape":"ResourceId", + "documentation":"

The resource ID of the rule group.

" + }, + "Priority":{ + "shape":"StatelessRuleGroupPriority", + "documentation":"

The priority of the rule group. AWS Network Firewall evaluates the stateless rule groups in a firewall policy starting from the lowest priority setting.

" + } + }, + "documentation":"

AWS Network Firewall stateless rule group, used in a NetworkFirewallPolicyDescription.

" + }, + "StatelessRuleGroupList":{ + "type":"list", + "member":{"shape":"StatelessRuleGroup"} + }, + "StatelessRuleGroupPriority":{ + "type":"integer", + "max":65535, + "min":1 + }, "Tag":{ "type":"structure", "required":[ @@ -1926,7 +2120,11 @@ "RESOURCE_MISSING_SECURITY_GROUP", "RESOURCE_VIOLATES_AUDIT_SECURITY_GROUP", "SECURITY_GROUP_UNUSED", - "SECURITY_GROUP_REDUNDANT" + "SECURITY_GROUP_REDUNDANT", + "MISSING_FIREWALL", + "MISSING_FIREWALL_SUBNET_IN_AZ", + "MISSING_EXPECTED_ROUTE_TABLE", + "NETWORK_FIREWALL_POLICY_MODIFIED" ] }, "ViolationTarget":{ @@ -1936,5 +2134,5 @@ "pattern":".*" } }, - "documentation":"AWS Firewall Manager

This is the AWS Firewall Manager API Reference. This guide is for developers who need detailed information about the AWS Firewall Manager API actions, data types, and errors. For detailed information about AWS Firewall Manager features, see the AWS Firewall Manager Developer Guide.

" + "documentation":"AWS Firewall Manager

This is the AWS Firewall Manager API Reference. This guide is for developers who need detailed information about the AWS Firewall Manager API actions, data types, and errors. For detailed information about AWS Firewall Manager features, see the AWS Firewall Manager Developer Guide.

Some API actions require explicit resource permissions. For information, see the developer guide topic Firewall Manager required permissions for API actions.

" } diff --git a/services/forecast/pom.xml b/services/forecast/pom.xml index 5bf14c94b4ee..185a4cc9c2dd 100644 --- a/services/forecast/pom.xml +++ b/services/forecast/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT forecast AWS Java SDK :: Services :: Forecast diff --git a/services/forecast/src/main/resources/codegen-resources/paginators-1.json b/services/forecast/src/main/resources/codegen-resources/paginators-1.json index 57661322c1a0..da6de7d8f44d 100644 --- a/services/forecast/src/main/resources/codegen-resources/paginators-1.json +++ b/services/forecast/src/main/resources/codegen-resources/paginators-1.json @@ -30,6 +30,12 @@ "output_token": "NextToken", "result_key": "Forecasts" }, + "ListPredictorBacktestExportJobs": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "PredictorBacktestExportJobs" + }, "ListPredictors": { "input_token": "NextToken", "limit_key": "MaxResults", diff --git a/services/forecast/src/main/resources/codegen-resources/service-2.json b/services/forecast/src/main/resources/codegen-resources/service-2.json index 9537a6b4fab9..cc234f8e3cb6 100644 --- a/services/forecast/src/main/resources/codegen-resources/service-2.json +++ b/services/forecast/src/main/resources/codegen-resources/service-2.json @@ -43,7 +43,7 @@ {"shape":"ResourceInUseException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Creates a dataset group, which holds a collection of related datasets. You can add datasets to the dataset group when you create the dataset group, or later by using the UpdateDatasetGroup operation.

After creating a dataset group and adding datasets, you use the dataset group when you create a predictor. For more information, see howitworks-datasets-groups.

To get a list of all your datasets groups, use the ListDatasetGroups operation.

The Status of a dataset group must be ACTIVE before you can create use the dataset group to create a predictor. To get the status, use the DescribeDatasetGroup operation.

" + "documentation":"

Creates a dataset group, which holds a collection of related datasets. You can add datasets to the dataset group when you create the dataset group, or later by using the UpdateDatasetGroup operation.

After creating a dataset group and adding datasets, you use the dataset group when you create a predictor. For more information, see howitworks-datasets-groups.

To get a list of all your datasets groups, use the ListDatasetGroups operation.

The Status of a dataset group must be ACTIVE before you can use the dataset group to create a predictor. To get the status, use the DescribeDatasetGroup operation.

" }, "CreateDatasetImportJob":{ "name":"CreateDatasetImportJob", @@ -111,7 +111,24 @@ {"shape":"ResourceInUseException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Creates an Amazon Forecast predictor.

In the request, you provide a dataset group and either specify an algorithm or let Amazon Forecast choose the algorithm for you using AutoML. If you specify an algorithm, you also can override algorithm-specific hyperparameters.

Amazon Forecast uses the chosen algorithm to train a model using the latest version of the datasets in the specified dataset group. The result is called a predictor. You then generate a forecast using the CreateForecast operation.

After training a model, the CreatePredictor operation also evaluates it. To see the evaluation metrics, use the GetAccuracyMetrics operation. Always review the evaluation metrics before deciding to use the predictor to generate a forecast.

Optionally, you can specify a featurization configuration to fill and aggregate the data fields in the TARGET_TIME_SERIES dataset to improve model training. For more information, see FeaturizationConfig.

For RELATED_TIME_SERIES datasets, CreatePredictor verifies that the DataFrequency specified when the dataset was created matches the ForecastFrequency. TARGET_TIME_SERIES datasets don't have this restriction. Amazon Forecast also verifies the delimiter and timestamp format. For more information, see howitworks-datasets-groups.

AutoML

If you want Amazon Forecast to evaluate each algorithm and choose the one that minimizes the objective function, set PerformAutoML to true. The objective function is defined as the mean of the weighted p10, p50, and p90 quantile losses. For more information, see EvaluationResult.

When AutoML is enabled, the following properties are disallowed:

  • AlgorithmArn

  • HPOConfig

  • PerformHPO

  • TrainingParameters

To get a list of all of your predictors, use the ListPredictors operation.

Before you can use the predictor to create a forecast, the Status of the predictor must be ACTIVE, signifying that training has completed. To get the status, use the DescribePredictor operation.

" + "documentation":"

Creates an Amazon Forecast predictor.

In the request, provide a dataset group and either specify an algorithm or let Amazon Forecast choose an algorithm for you using AutoML. If you specify an algorithm, you also can override algorithm-specific hyperparameters.

Amazon Forecast uses the algorithm to train a predictor using the latest version of the datasets in the specified dataset group. You can then generate a forecast using the CreateForecast operation.

To see the evaluation metrics, use the GetAccuracyMetrics operation.

You can specify a featurization configuration to fill and aggregate the data fields in the TARGET_TIME_SERIES dataset to improve model training. For more information, see FeaturizationConfig.

For RELATED_TIME_SERIES datasets, CreatePredictor verifies that the DataFrequency specified when the dataset was created matches the ForecastFrequency. TARGET_TIME_SERIES datasets don't have this restriction. Amazon Forecast also verifies the delimiter and timestamp format. For more information, see howitworks-datasets-groups.

By default, predictors are trained and evaluated at the 0.1 (P10), 0.5 (P50), and 0.9 (P90) quantiles. You can choose custom forecast types to train and evaluate your predictor by setting the ForecastTypes.

AutoML

If you want Amazon Forecast to evaluate each algorithm and choose the one that minimizes the objective function, set PerformAutoML to true. The objective function is defined as the mean of the weighted losses over the forecast types. By default, these are the p10, p50, and p90 quantile losses. For more information, see EvaluationResult.

When AutoML is enabled, the following properties are disallowed:

  • AlgorithmArn

  • HPOConfig

  • PerformHPO

  • TrainingParameters

To get a list of all of your predictors, use the ListPredictors operation.

Before you can use the predictor to create a forecast, the Status of the predictor must be ACTIVE, signifying that training has completed. To get the status, use the DescribePredictor operation.

" + }, + "CreatePredictorBacktestExportJob":{ + "name":"CreatePredictorBacktestExportJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreatePredictorBacktestExportJobRequest"}, + "output":{"shape":"CreatePredictorBacktestExportJobResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Exports backtest forecasts and accuracy metrics generated by the CreatePredictor operation. Two CSV files are exported to a specified S3 bucket.

You must specify a DataDestination object that includes an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the Amazon S3 bucket. For more information, see aws-forecast-iam-roles.

" }, "DeleteDataset":{ "name":"DeleteDataset", @@ -203,6 +220,21 @@ "documentation":"

Deletes a predictor created using the CreatePredictor operation. You can delete only predictor that have a status of ACTIVE or CREATE_FAILED. To get the status, use the DescribePredictor operation.

", "idempotent":true }, + "DeletePredictorBacktestExportJob":{ + "name":"DeletePredictorBacktestExportJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeletePredictorBacktestExportJobRequest"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"} + ], + "documentation":"

Deletes a predictor backtest export job.

", + "idempotent":true + }, "DescribeDataset":{ "name":"DescribeDataset", "http":{ @@ -293,6 +325,21 @@ "documentation":"

Describes a predictor created using the CreatePredictor operation.

In addition to listing the properties provided in the CreatePredictor request, this operation lists the following properties:

  • DatasetImportJobArns - The dataset import jobs used to import training data.

  • AutoMLAlgorithmArns - If AutoML is performed, the algorithms that were evaluated.

  • CreationTime

  • LastModificationTime

  • Status

  • Message - If an error occurred, information about the error.

", "idempotent":true }, + "DescribePredictorBacktestExportJob":{ + "name":"DescribePredictorBacktestExportJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribePredictorBacktestExportJobRequest"}, + "output":{"shape":"DescribePredictorBacktestExportJobResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Describes a predictor backtest export job created using the CreatePredictorBacktestExportJob operation.

In addition to listing the properties provided by the user in the CreatePredictorBacktestExportJob request, this operation lists the following properties:

  • CreationTime

  • LastModificationTime

  • Status

  • Message (if an error occurred)

", + "idempotent":true + }, "GetAccuracyMetrics":{ "name":"GetAccuracyMetrics", "http":{ @@ -306,7 +353,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ResourceInUseException"} ], - "documentation":"

Provides metrics on the accuracy of the models that were trained by the CreatePredictor operation. Use metrics to see how well the model performed and to decide whether to use the predictor to generate a forecast. For more information, see metrics.

This operation generates metrics for each backtest window that was evaluated. The number of backtest windows (NumberOfBacktestWindows) is specified using the EvaluationParameters object, which is optionally included in the CreatePredictor request. If NumberOfBacktestWindows isn't specified, the number defaults to one.

The parameters of the filling method determine which items contribute to the metrics. If you want all items to contribute, specify zero. If you want only those items that have complete data in the range being evaluated to contribute, specify nan. For more information, see FeaturizationMethod.

Before you can get accuracy metrics, the Status of the predictor must be ACTIVE, signifying that training has completed. To get the status, use the DescribePredictor operation.

", + "documentation":"

Provides metrics on the accuracy of the models that were trained by the CreatePredictor operation. Use metrics to see how well the model performed and to decide whether to use the predictor to generate a forecast. For more information, see Predictor Metrics.

This operation generates metrics for each backtest window that was evaluated. The number of backtest windows (NumberOfBacktestWindows) is specified using the EvaluationParameters object, which is optionally included in the CreatePredictor request. If NumberOfBacktestWindows isn't specified, the number defaults to one.

The parameters of the filling method determine which items contribute to the metrics. If you want all items to contribute, specify zero. If you want only those items that have complete data in the range being evaluated to contribute, specify nan. For more information, see FeaturizationMethod.

Before you can get accuracy metrics, the Status of the predictor must be ACTIVE, signifying that training has completed. To get the status, use the DescribePredictor operation.

", "idempotent":true }, "ListDatasetGroups":{ @@ -382,6 +429,21 @@ "documentation":"

Returns a list of forecasts created using the CreateForecast operation. For each forecast, this operation returns a summary of its properties, including its Amazon Resource Name (ARN). To retrieve the complete set of properties, specify the ARN with the DescribeForecast operation. You can filter the list using an array of Filter objects.

", "idempotent":true }, + "ListPredictorBacktestExportJobs":{ + "name":"ListPredictorBacktestExportJobs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListPredictorBacktestExportJobsRequest"}, + "output":{"shape":"ListPredictorBacktestExportJobsResponse"}, + "errors":[ + {"shape":"InvalidNextTokenException"}, + {"shape":"InvalidInputException"} + ], + "documentation":"

Returns a list of predictor backtest export jobs created using the CreatePredictorBacktestExportJob operation. This operation returns a summary for each backtest export job. You can filter the list using an array of Filter objects.

To retrieve the complete set of properties for a particular backtest export job, use the ARN with the DescribePredictorBacktestExportJob operation.

", + "idempotent":true + }, "ListPredictors":{ "name":"ListPredictors", "http":{ @@ -724,6 +786,38 @@ } } }, + "CreatePredictorBacktestExportJobRequest":{ + "type":"structure", + "required":[ + "PredictorBacktestExportJobName", + "PredictorArn", + "Destination" + ], + "members":{ + "PredictorBacktestExportJobName":{ + "shape":"Name", + "documentation":"

The name for the backtest export job.

" + }, + "PredictorArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the predictor that you want to export.

" + }, + "Destination":{"shape":"DataDestination"}, + "Tags":{ + "shape":"Tags", + "documentation":"

Optional metadata to help you categorize and organize your backtests. Each tag consists of a key and an optional value, both of which you define. Tag keys and values are case sensitive.

The following restrictions apply to tags:

  • For each resource, each tag key must be unique and each tag key must have one value.

  • Maximum number of tags per resource: 50.

  • Maximum key length: 128 Unicode characters in UTF-8.

  • Maximum value length: 256 Unicode characters in UTF-8.

  • Accepted characters: all letters and numbers, spaces representable in UTF-8, and + - = . _ : / @. If your tagging schema is used across other services and resources, the character restrictions of those services also apply.

  • Key prefixes cannot include any upper or lowercase combination of aws: or AWS:. Values can have this prefix. If a tag value has aws as its prefix but the key does not, Forecast considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of aws do not count against your tags per resource limit. You cannot edit or delete tag keys with this prefix.

" + } + } + }, + "CreatePredictorBacktestExportJobResponse":{ + "type":"structure", + "members":{ + "PredictorBacktestExportJobArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the predictor backtest export job that you want to export.

" + } + } + }, "CreatePredictorRequest":{ "type":"structure", "required":[ @@ -739,19 +833,23 @@ }, "AlgorithmArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the algorithm to use for model training. Required if PerformAutoML is not set to true.

Supported algorithms:

  • arn:aws:forecast:::algorithm/ARIMA

  • arn:aws:forecast:::algorithm/Deep_AR_Plus

    Supports hyperparameter optimization (HPO)

  • arn:aws:forecast:::algorithm/ETS

  • arn:aws:forecast:::algorithm/NPTS

  • arn:aws:forecast:::algorithm/Prophet

" + "documentation":"

The Amazon Resource Name (ARN) of the algorithm to use for model training. Required if PerformAutoML is not set to true.

Supported algorithms:

  • arn:aws:forecast:::algorithm/ARIMA

  • arn:aws:forecast:::algorithm/CNN-QR

  • arn:aws:forecast:::algorithm/Deep_AR_Plus

  • arn:aws:forecast:::algorithm/ETS

  • arn:aws:forecast:::algorithm/NPTS

  • arn:aws:forecast:::algorithm/Prophet

" }, "ForecastHorizon":{ "shape":"Integer", "documentation":"

Specifies the number of time-steps that the model is trained to predict. The forecast horizon is also called the prediction length.

For example, if you configure a dataset for daily data collection (using the DataFrequency parameter of the CreateDataset operation) and set the forecast horizon to 10, the model returns predictions for 10 days.

The maximum forecast horizon is the lesser of 500 time-steps or 1/3 of the TARGET_TIME_SERIES dataset length.

" }, + "ForecastTypes":{ + "shape":"ForecastTypes", + "documentation":"

Specifies the forecast types used to train a predictor. You can specify up to five forecast types. Forecast types can be quantiles from 0.01 to 0.99, by increments of 0.01 or higher. You can also specify the mean forecast with mean.

The default value is [\"0.10\", \"0.50\", \"0.9\"].

" + }, "PerformAutoML":{ "shape":"Boolean", "documentation":"

Whether to perform AutoML. When Amazon Forecast performs AutoML, it evaluates the algorithms it provides and chooses the best algorithm and configuration for your training dataset.

The default value is false. In this case, you are required to specify an algorithm.

Set PerformAutoML to true to have Amazon Forecast perform AutoML. This is a good option if you aren't sure which algorithm is suitable for your training data. In this case, PerformHPO must be false.

" }, "PerformHPO":{ "shape":"Boolean", - "documentation":"

Whether to perform hyperparameter optimization (HPO). HPO finds optimal hyperparameter values for your training data. The process of performing HPO is known as running a hyperparameter tuning job.

The default value is false. In this case, Amazon Forecast uses default hyperparameter values from the chosen algorithm.

To override the default values, set PerformHPO to true and, optionally, supply the HyperParameterTuningJobConfig object. The tuning job specifies a metric to optimize, which hyperparameters participate in tuning, and the valid range for each tunable hyperparameter. In this case, you are required to specify an algorithm and PerformAutoML must be false.

The following algorithm supports HPO:

  • DeepAR+

" + "documentation":"

Whether to perform hyperparameter optimization (HPO). HPO finds optimal hyperparameter values for your training data. The process of performing HPO is known as running a hyperparameter tuning job.

The default value is false. In this case, Amazon Forecast uses default hyperparameter values from the chosen algorithm.

To override the default values, set PerformHPO to true and, optionally, supply the HyperParameterTuningJobConfig object. The tuning job specifies a metric to optimize, which hyperparameters participate in tuning, and the valid range for each tunable hyperparameter. In this case, you are required to specify an algorithm and PerformAutoML must be false.

The following algorithms support HPO:

  • DeepAR+

  • CNN-QR

" }, "TrainingParameters":{ "shape":"TrainingParameters", @@ -801,7 +899,7 @@ "documentation":"

The path to an Amazon Simple Storage Service (Amazon S3) bucket along with the credentials to access the bucket.

" } }, - "documentation":"

The destination for an exported forecast, an AWS Identity and Access Management (IAM) role that allows Amazon Forecast to access the location and, optionally, an AWS Key Management Service (KMS) key. This object is submitted in the CreateForecastExportJob request.

" + "documentation":"

The destination for an export job, an AWS Identity and Access Management (IAM) role that allows Amazon Forecast to access the location and, optionally, an AWS Key Management Service (KMS) key.

" }, "DataSource":{ "type":"structure", @@ -970,6 +1068,16 @@ } } }, + "DeletePredictorBacktestExportJobRequest":{ + "type":"structure", + "required":["PredictorBacktestExportJobArn"], + "members":{ + "PredictorBacktestExportJobArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the predictor backtest export job to delete.

" + } + } + }, "DeletePredictorRequest":{ "type":"structure", "required":["PredictorArn"], @@ -1235,6 +1343,50 @@ } } }, + "DescribePredictorBacktestExportJobRequest":{ + "type":"structure", + "required":["PredictorBacktestExportJobArn"], + "members":{ + "PredictorBacktestExportJobArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the predictor backtest export job.

" + } + } + }, + "DescribePredictorBacktestExportJobResponse":{ + "type":"structure", + "members":{ + "PredictorBacktestExportJobArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the predictor backtest export job.

" + }, + "PredictorBacktestExportJobName":{ + "shape":"Name", + "documentation":"

The name of the predictor backtest export job.

" + }, + "PredictorArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the predictor.

" + }, + "Destination":{"shape":"DataDestination"}, + "Message":{ + "shape":"Message", + "documentation":"

Information about any errors that may have occurred during the backtest export.

" + }, + "Status":{ + "shape":"Status", + "documentation":"

The status of the predictor backtest export job. States include:

  • ACTIVE

  • CREATE_PENDING

  • CREATE_IN_PROGRESS

  • CREATE_FAILED

  • DELETE_PENDING

  • DELETE_IN_PROGRESS

  • DELETE_FAILED

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

When the predictor backtest export job was created.

" + }, + "LastModificationTime":{ + "shape":"Timestamp", + "documentation":"

When the last successful export job finished.

" + } + } + }, "DescribePredictorRequest":{ "type":"structure", "required":["PredictorArn"], @@ -1264,6 +1416,10 @@ "shape":"Integer", "documentation":"

The number of time-steps of the forecast. The forecast horizon is also called the prediction length.

" }, + "ForecastTypes":{ + "shape":"ForecastTypes", + "documentation":"

The forecast types used during predictor training. Default value is [\"0.1\",\"0.5\",\"0.9\"]

" + }, "PerformAutoML":{ "shape":"Boolean", "documentation":"

Whether the predictor is set to perform AutoML.

" @@ -1274,7 +1430,7 @@ }, "TrainingParameters":{ "shape":"TrainingParameters", - "documentation":"

The default training parameters or overrides selected during model training. If using the AutoML algorithm or if HPO is turned on while using the DeepAR+ algorithms, the optimized values for the chosen hyperparameters are returned. For more information, see aws-forecast-choosing-recipes.

" + "documentation":"

The default training parameters or overrides selected during model training. When running AutoML or choosing HPO with CNN-QR or DeepAR+, the optimized values for the chosen hyperparameters are returned. For more information, see aws-forecast-choosing-recipes.

" }, "EvaluationParameters":{ "shape":"EvaluationParameters", @@ -1358,6 +1514,28 @@ "documentation":"

An AWS Key Management Service (KMS) key and an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the key. You can specify this optional object in the CreateDataset and CreatePredictor requests.

" }, "ErrorMessage":{"type":"string"}, + "ErrorMetric":{ + "type":"structure", + "members":{ + "ForecastType":{ + "shape":"ForecastType", + "documentation":"

The Forecast type used to compute WAPE and RMSE.

" + }, + "WAPE":{ + "shape":"Double", + "documentation":"

The weighted absolute percentage error (WAPE).

" + }, + "RMSE":{ + "shape":"Double", + "documentation":"

The root-mean-square error (RMSE).

" + } + }, + "documentation":"

Provides detailed error metrics to evaluate the performance of a predictor. This object is part of the Metrics object.

" + }, + "ErrorMetrics":{ + "type":"list", + "member":{"shape":"ErrorMetric"} + }, "EvaluationParameters":{ "type":"structure", "members":{ @@ -1437,7 +1615,7 @@ }, "FeaturizationMethodParameters":{ "shape":"FeaturizationMethodParameters", - "documentation":"

The method parameters (key-value pairs), which are a map of override parameters. Specify these parameters to override the default values. Related Time Series attributes do not accept aggregation parameters.

The following list shows the parameters and their valid values for the \"filling\" featurization method for a Target Time Series dataset. Bold signifies the default value.

  • aggregation: sum, avg, first, min, max

  • frontfill: none

  • middlefill: zero, nan (not a number), value, median, mean, min, max

  • backfill: zero, nan, value, median, mean, min, max

The following list shows the parameters and their valid values for a Related Time Series featurization method (there are no defaults):

  • middlefill: zero, value, median, mean, min, max

  • backfill: zero, value, median, mean, min, max

  • futurefill: zero, value, median, mean, min, max

" + "documentation":"

The method parameters (key-value pairs), which are a map of override parameters. Specify these parameters to override the default values. Related Time Series attributes do not accept aggregation parameters.

The following list shows the parameters and their valid values for the \"filling\" featurization method for a Target Time Series dataset. Bold signifies the default value.

  • aggregation: sum, avg, first, min, max

  • frontfill: none

  • middlefill: zero, nan (not a number), value, median, mean, min, max

  • backfill: zero, nan, value, median, mean, min, max

The following list shows the parameters and their valid values for a Related Time Series featurization method (there are no defaults):

  • middlefill: zero, value, median, mean, min, max

  • backfill: zero, value, median, mean, min, max

  • futurefill: zero, value, median, mean, min, max

To set a filling method to a specific value, set the fill parameter to value and define the value in a corresponding _value parameter. For example, to set backfilling to a value of 2, include the following: \"backfill\": \"value\" and \"backfill_value\":\"2\".

" } }, "documentation":"

Provides information about the method that featurizes (transforms) a dataset field. The method is part of the FeaturizationPipeline of the Featurization object.

The following is an example of how you specify a FeaturizationMethod object.

{

\"FeaturizationMethodName\": \"filling\",

\"FeaturizationMethodParameters\": {\"aggregation\": \"sum\", \"middlefill\": \"zero\", \"backfill\": \"zero\"}

}

" @@ -1853,6 +2031,36 @@ } } }, + "ListPredictorBacktestExportJobsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the result of the previous request was truncated, the response includes a NextToken. To retrieve the next set of results, use the token in the next request. Tokens expire after 24 hours.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The number of items to return in the response.

" + }, + "Filters":{ + "shape":"Filters", + "documentation":"

An array of filters. For each filter, provide a condition and a match statement. The condition is either IS or IS_NOT, which specifies whether to include or exclude the predictor backtest export jobs that match the statement from the list. The match statement consists of a key and a value.

Filter properties

  • Condition - The condition to apply. Valid values are IS and IS_NOT. To include the predictor backtest export jobs that match the statement, specify IS. To exclude matching predictor backtest export jobs, specify IS_NOT.

  • Key - The name of the parameter to filter on. Valid values are PredictorBacktestExportJobArn and Status.

  • Value - The value to match.

" + } + } + }, + "ListPredictorBacktestExportJobsResponse":{ + "type":"structure", + "members":{ + "PredictorBacktestExportJobs":{ + "shape":"PredictorBacktestExportJobs", + "documentation":"

An array of objects that summarize the properties of each predictor backtest export job.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

Returns this token if the response is truncated. To retrieve the next set of results, use the token in the next request.

" + } + } + }, "ListPredictorsRequest":{ "type":"structure", "members":{ @@ -1913,11 +2121,17 @@ "members":{ "RMSE":{ "shape":"Double", - "documentation":"

The root mean square error (RMSE).

" + "documentation":"

The root-mean-square error (RMSE).

", + "deprecated":true, + "deprecatedMessage":"This property is deprecated, please refer to ErrorMetrics for both RMSE and WAPE" }, "WeightedQuantileLosses":{ "shape":"WeightedQuantileLosses", "documentation":"

An array of weighted quantile losses. Quantiles divide a probability distribution into regions of equal probability. The distribution in this case is the loss function.

" + }, + "ErrorMetrics":{ + "shape":"ErrorMetrics", + "documentation":"

Provides detailed error metrics on forecast type, root-mean square-error (RMSE), and weighted average percentage error (WAPE).

" } }, "documentation":"

Provides metrics that are used to evaluate the performance of a predictor. This object is part of the WindowSummary object.

" @@ -1961,6 +2175,41 @@ "max":256, "pattern":"^[a-zA-Z0-9\\-\\_\\.\\/\\[\\]\\,\\\"\\\\\\s]+$" }, + "PredictorBacktestExportJobSummary":{ + "type":"structure", + "members":{ + "PredictorBacktestExportJobArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the predictor backtest export job.

" + }, + "PredictorBacktestExportJobName":{ + "shape":"Name", + "documentation":"

The name of the predictor backtest export job.

" + }, + "Destination":{"shape":"DataDestination"}, + "Status":{ + "shape":"Status", + "documentation":"

The status of the predictor backtest export job. States include:

  • ACTIVE

  • CREATE_PENDING

  • CREATE_IN_PROGRESS

  • CREATE_FAILED

  • DELETE_PENDING

  • DELETE_IN_PROGRESS

  • DELETE_FAILED

" + }, + "Message":{ + "shape":"ErrorMessage", + "documentation":"

Information about any errors that may have occurred during the backtest export.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

When the predictor backtest export job was created.

" + }, + "LastModificationTime":{ + "shape":"Timestamp", + "documentation":"

When the last successful export job finished.

" + } + }, + "documentation":"

Provides a summary of the predictor backtest export job properties used in the ListPredictorBacktestExportJobs operation. To get a complete set of properties, call the DescribePredictorBacktestExportJob operation, and provide the listed PredictorBacktestExportJobArn.

" + }, + "PredictorBacktestExportJobs":{ + "type":"list", + "member":{"shape":"PredictorBacktestExportJobSummary"} + }, "PredictorEvaluationResults":{ "type":"list", "member":{"shape":"EvaluationResult"} @@ -2077,7 +2326,7 @@ "documentation":"

The Amazon Resource Name (ARN) of an AWS Key Management Service (KMS) key.

" } }, - "documentation":"

The path to the file(s) in an Amazon Simple Storage Service (Amazon S3) bucket, and an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the file(s). Optionally, includes an AWS Key Management Service (KMS) key. This object is part of the DataSource object that is submitted in the CreateDatasetImportJob request, and part of the DataDestination object that is submitted in the CreateForecastExportJob request.

" + "documentation":"

The path to the file(s) in an Amazon Simple Storage Service (Amazon S3) bucket, and an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the file(s). Optionally, includes an AWS Key Management Service (KMS) key. This object is part of the DataSource object that is submitted in the CreateDatasetImportJob request, and part of the DataDestination object.

" }, "S3Path":{ "type":"string", @@ -2182,10 +2431,10 @@ }, "Value":{ "shape":"Value", - "documentation":"

One of the following 2 letter country codes:

  • \"AR\" - ARGENTINA

  • \"AT\" - AUSTRIA

  • \"AU\" - AUSTRALIA

  • \"BE\" - BELGIUM

  • \"BR\" - BRAZIL

  • \"CA\" - CANADA

  • \"CN\" - CHINA

  • \"CZ\" - CZECH REPUBLIC

  • \"DK\" - DENMARK

  • \"EC\" - ECUADOR

  • \"FI\" - FINLAND

  • \"FR\" - FRANCE

  • \"DE\" - GERMANY

  • \"HU\" - HUNGARY

  • \"IE\" - IRELAND

  • \"IN\" - INDIA

  • \"IT\" - ITALY

  • \"JP\" - JAPAN

  • \"KR\" - KOREA

  • \"LU\" - LUXEMBOURG

  • \"MX\" - MEXICO

  • \"NL\" - NETHERLANDS

  • \"NO\" - NORWAY

  • \"PL\" - POLAND

  • \"PT\" - PORTUGAL

  • \"RU\" - RUSSIA

  • \"ZA\" - SOUTH AFRICA

  • \"ES\" - SPAIN

  • \"SE\" - SWEDEN

  • \"CH\" - SWITZERLAND

  • \"US\" - UNITED STATES

  • \"UK\" - UNITED KINGDOM

" + "documentation":"

One of the following 2 letter country codes:

  • \"AL\" - ALBANIA

  • \"AR\" - ARGENTINA

  • \"AT\" - AUSTRIA

  • \"AU\" - AUSTRALIA

  • \"BA\" - BOSNIA HERZEGOVINA

  • \"BE\" - BELGIUM

  • \"BG\" - BULGARIA

  • \"BO\" - BOLIVIA

  • \"BR\" - BRAZIL

  • \"BY\" - BELARUS

  • \"CA\" - CANADA

  • \"CL\" - CHILE

  • \"CO\" - COLOMBIA

  • \"CR\" - COSTA RICA

  • \"HR\" - CROATIA

  • \"CZ\" - CZECH REPUBLIC

  • \"DK\" - DENMARK

  • \"EC\" - ECUADOR

  • \"EE\" - ESTONIA

  • \"ET\" - ETHIOPIA

  • \"FI\" - FINLAND

  • \"FR\" - FRANCE

  • \"DE\" - GERMANY

  • \"GR\" - GREECE

  • \"HU\" - HUNGARY

  • \"IS\" - ICELAND

  • \"IN\" - INDIA

  • \"IE\" - IRELAND

  • \"IT\" - ITALY

  • \"JP\" - JAPAN

  • \"KZ\" - KAZAKHSTAN

  • \"KR\" - KOREA

  • \"LV\" - LATVIA

  • \"LI\" - LIECHTENSTEIN

  • \"LT\" - LITHUANIA

  • \"LU\" - LUXEMBOURG

  • \"MK\" - MACEDONIA

  • \"MT\" - MALTA

  • \"MX\" - MEXICO

  • \"MD\" - MOLDOVA

  • \"ME\" - MONTENEGRO

  • \"NL\" - NETHERLANDS

  • \"NZ\" - NEW ZEALAND

  • \"NI\" - NICARAGUA

  • \"NG\" - NIGERIA

  • \"NO\" - NORWAY

  • \"PA\" - PANAMA

  • \"PY\" - PARAGUAY

  • \"PE\" - PERU

  • \"PL\" - POLAND

  • \"PT\" - PORTUGAL

  • \"RO\" - ROMANIA

  • \"RU\" - RUSSIA

  • \"RS\" - SERBIA

  • \"SK\" - SLOVAKIA

  • \"SI\" - SLOVENIA

  • \"ZA\" - SOUTH AFRICA

  • \"ES\" - SPAIN

  • \"SE\" - SWEDEN

  • \"CH\" - SWITZERLAND

  • \"UA\" - UKRAINE

  • \"AE\" - UNITED ARAB EMIRATES

  • \"US\" - UNITED STATES

  • \"UK\" - UNITED KINGDOM

  • \"UY\" - URUGUAY

  • \"VE\" - VENEZUELA

" } }, - "documentation":"

Describes a supplementary feature of a dataset group. This object is part of the InputDataConfig object.

The only supported feature is a holiday calendar. If you use the calendar, all data in the datasets should belong to the same country as the calendar. For the holiday calendar data, see the Jollyday web site.

India and Korea's holidays are not included in the Jollyday library, but both are supported by Amazon Forecast. Their holidays are:

\"IN\" - INDIA

  • JANUARY 26 - REPUBLIC DAY

  • AUGUST 15 - INDEPENDENCE DAY

  • OCTOBER 2 GANDHI'S BIRTHDAY

\"KR\" - KOREA

  • JANUARY 1 - NEW YEAR

  • MARCH 1 - INDEPENDENCE MOVEMENT DAY

  • MAY 5 - CHILDREN'S DAY

  • JUNE 6 - MEMORIAL DAY

  • AUGUST 15 - LIBERATION DAY

  • OCTOBER 3 - NATIONAL FOUNDATION DAY

  • OCTOBER 9 - HANGEUL DAY

  • DECEMBER 25 - CHRISTMAS DAY

" + "documentation":"

Describes a supplementary feature of a dataset group. This object is part of the InputDataConfig object.

The only supported feature is Holidays. If you use the calendar, all data in the datasets should belong to the same country as the calendar. For the holiday calendar data, see the Jollyday website.

" }, "SupplementaryFeatures":{ "type":"list", diff --git a/services/forecastquery/pom.xml b/services/forecastquery/pom.xml index 1bfa79507fbc..61ffb0d741c6 100644 --- a/services/forecastquery/pom.xml +++ b/services/forecastquery/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT forecastquery AWS Java SDK :: Services :: Forecastquery diff --git a/services/frauddetector/pom.xml b/services/frauddetector/pom.xml index 2637799130d2..f3bff36ba45d 100644 --- a/services/frauddetector/pom.xml +++ b/services/frauddetector/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT frauddetector AWS Java SDK :: Services :: FraudDetector diff --git a/services/frauddetector/src/main/resources/codegen-resources/service-2.json b/services/frauddetector/src/main/resources/codegen-resources/service-2.json index c7afce8be30f..f7dc9352b732 100644 --- a/services/frauddetector/src/main/resources/codegen-resources/service-2.json +++ b/services/frauddetector/src/main/resources/codegen-resources/service-2.json @@ -139,7 +139,7 @@ {"shape":"ThrottlingException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Deletes the detector. Before deleting a detector, you must first delete all detector versions and rule versions associated with the detector.

" + "documentation":"

Deletes the detector. Before deleting a detector, you must first delete all detector versions and rule versions associated with the detector.

When you delete a detector, Amazon Fraud Detector permanently deletes the detector and the data is no longer stored in Amazon Fraud Detector.

" }, "DeleteDetectorVersion":{ "name":"DeleteDetectorVersion", @@ -157,7 +157,23 @@ {"shape":"ConflictException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Deletes the detector version. You cannot delete detector versions that are in ACTIVE status.

" + "documentation":"

Deletes the detector version. You cannot delete detector versions that are in ACTIVE status.

When you delete a detector version, Amazon Fraud Detector permanently deletes the detector and the data is no longer stored in Amazon Fraud Detector.

" + }, + "DeleteEntityType":{ + "name":"DeleteEntityType", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteEntityTypeRequest"}, + "output":{"shape":"DeleteEntityTypeResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Deletes an entity type.

You cannot delete an entity type that is included in an event type.

When you delete an entity type, Amazon Fraud Detector permanently deletes that entity type from the evaluation history, and the data is no longer stored in Amazon Fraud Detector.

" }, "DeleteEvent":{ "name":"DeleteEvent", @@ -170,9 +186,107 @@ "errors":[ {"shape":"InternalServerException"}, {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Deletes the specified event.

When you delete an event, Amazon Fraud Detector permanently deletes that event from the evaluation history, and the event data is no longer stored in Amazon Fraud Detector.

" + }, + "DeleteEventType":{ + "name":"DeleteEventType", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteEventTypeRequest"}, + "output":{"shape":"DeleteEventTypeResult"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Deletes an event type.

You cannot delete an event type that is used in a detector or a model.

When you delete an entity type, Amazon Fraud Detector permanently deletes that entity type from the evaluation history, and the data is no longer stored in Amazon Fraud Detector.

" + }, + "DeleteExternalModel":{ + "name":"DeleteExternalModel", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteExternalModelRequest"}, + "output":{"shape":"DeleteExternalModelResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Removes a SageMaker model from Amazon Fraud Detector.

You can remove an Amazon SageMaker model if it is not associated with a detector version. Removing a SageMaker model disconnects it from Amazon Fraud Detector, but the model remains available in SageMaker.

" + }, + "DeleteLabel":{ + "name":"DeleteLabel", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteLabelRequest"}, + "output":{"shape":"DeleteLabelResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Deletes a label.

You cannot delete labels that are included in an event type in Amazon Fraud Detector.

You cannot delete a label assigned to an event ID. You must first delete the relevant event ID.

When you delete a label, Amazon Fraud Detector permanently deletes that label from the evaluation history, and the data is no longer stored in Amazon Fraud Detector.

" + }, + "DeleteModel":{ + "name":"DeleteModel", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteModelRequest"}, + "output":{"shape":"DeleteModelResult"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Deletes a model.

You can delete models and model versions in Amazon Fraud Detector, provided that they are not associated with a detector version.

When you delete a model, Amazon Fraud Detector permanently deletes that model from the evaluation history, and the data is no longer stored in Amazon Fraud Detector.

" + }, + "DeleteModelVersion":{ + "name":"DeleteModelVersion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteModelVersionRequest"}, + "output":{"shape":"DeleteModelVersionResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Deletes a model version.

You can delete models and model versions in Amazon Fraud Detector, provided that they are not associated with a detector version.

When you delete a model version, Amazon Fraud Detector permanently deletes that model version from the evaluation history, and the data is no longer stored in Amazon Fraud Detector.

" + }, + "DeleteOutcome":{ + "name":"DeleteOutcome", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteOutcomeRequest"}, + "output":{"shape":"DeleteOutcomeResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Deletes the specified event.

" + "documentation":"

Deletes an outcome.

You cannot delete an outcome that is used in a rule version.

When you delete an outcome, Amazon Fraud Detector permanently deletes that outcome from the evaluation history, and the data is no longer stored in Amazon Fraud Detector.

" }, "DeleteRule":{ "name":"DeleteRule", @@ -189,7 +303,24 @@ {"shape":"ThrottlingException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Deletes the rule. You cannot delete a rule if it is used by an ACTIVE or INACTIVE detector version.

" + "documentation":"

Deletes the rule. You cannot delete a rule if it is used by an ACTIVE or INACTIVE detector version.

When you delete a rule, Amazon Fraud Detector permanently deletes that rule from the evaluation history, and the data is no longer stored in Amazon Fraud Detector.

" + }, + "DeleteVariable":{ + "name":"DeleteVariable", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVariableRequest"}, + "output":{"shape":"DeleteVariableResult"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Deletes a variable.

You can't delete variables that are included in an event type in Amazon Fraud Detector.

Amazon Fraud Detector automatically deletes model output variables and SageMaker model output variables when you delete the model. You can't delete these variables manually.

When you delete a variable, Amazon Fraud Detector permanently deletes that variable from the evaluation history, and the data is no longer stored in Amazon Fraud Detector.

" }, "DescribeDetector":{ "name":"DescribeDetector", @@ -287,7 +418,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"}, {"shape":"ThrottlingException"}, - {"shape":"AccessDeniedException"} + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"} ], "documentation":"

Evaluates an event against a detector version. If a version ID is not provided, the detector’s (ACTIVE) version is used.

" }, @@ -1161,6 +1293,21 @@ "members":{ } }, + "DeleteEntityTypeRequest":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{ + "shape":"identifier", + "documentation":"

The name of the entity type to delete.

" + } + } + }, + "DeleteEntityTypeResult":{ + "type":"structure", + "members":{ + } + }, "DeleteEventRequest":{ "type":"structure", "required":[ @@ -1169,11 +1316,11 @@ ], "members":{ "eventId":{ - "shape":"string", + "shape":"identifier", "documentation":"

The ID of the event to delete.

" }, "eventTypeName":{ - "shape":"string", + "shape":"identifier", "documentation":"

The name of the event type.

" } } @@ -1183,6 +1330,115 @@ "members":{ } }, + "DeleteEventTypeRequest":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{ + "shape":"identifier", + "documentation":"

The name of the event type to delete.

" + } + } + }, + "DeleteEventTypeResult":{ + "type":"structure", + "members":{ + } + }, + "DeleteExternalModelRequest":{ + "type":"structure", + "required":["modelEndpoint"], + "members":{ + "modelEndpoint":{ + "shape":"sageMakerEndpointIdentifier", + "documentation":"

The endpoint of the Amazon Sagemaker model to delete.

" + } + } + }, + "DeleteExternalModelResult":{ + "type":"structure", + "members":{ + } + }, + "DeleteLabelRequest":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{ + "shape":"identifier", + "documentation":"

The name of the label to delete.

" + } + } + }, + "DeleteLabelResult":{ + "type":"structure", + "members":{ + } + }, + "DeleteModelRequest":{ + "type":"structure", + "required":[ + "modelId", + "modelType" + ], + "members":{ + "modelId":{ + "shape":"modelIdentifier", + "documentation":"

The model ID of the model to delete.

" + }, + "modelType":{ + "shape":"ModelTypeEnum", + "documentation":"

The model type of the model to delete.

" + } + } + }, + "DeleteModelResult":{ + "type":"structure", + "members":{ + } + }, + "DeleteModelVersionRequest":{ + "type":"structure", + "required":[ + "modelId", + "modelType", + "modelVersionNumber" + ], + "members":{ + "modelId":{ + "shape":"modelIdentifier", + "documentation":"

The model ID of the model version to delete.

" + }, + "modelType":{ + "shape":"ModelTypeEnum", + "documentation":"

The model type of the model version to delete.

" + }, + "modelVersionNumber":{ + "shape":"floatVersionString", + "documentation":"

The model version number of the model version to delete.

" + } + } + }, + "DeleteModelVersionResult":{ + "type":"structure", + "members":{ + } + }, + "DeleteOutcomeRequest":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{ + "shape":"identifier", + "documentation":"

The name of the outcome to delete.

" + } + } + }, + "DeleteOutcomeResult":{ + "type":"structure", + "members":{ + } + }, "DeleteRuleRequest":{ "type":"structure", "required":["rule"], @@ -1195,6 +1451,21 @@ "members":{ } }, + "DeleteVariableRequest":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{ + "shape":"string", + "documentation":"

The name of the variable to delete.

" + } + } + }, + "DeleteVariableResult":{ + "type":"structure", + "members":{ + } + }, "DescribeDetectorRequest":{ "type":"structure", "required":["detectorId"], @@ -1894,7 +2165,7 @@ }, "status":{ "shape":"string", - "documentation":"

The model version status.

" + "documentation":"

The model version status.

Possible values are:

  • TRAINING_IN_PROGRESS

  • TRAINING_COMPLETE

  • ACTIVATE_REQUESTED

  • ACTIVATE_IN_PROGRESS

  • ACTIVE

  • INACTIVATE_REQUESTED

  • INACTIVATE_IN_PROGRESS

  • INACTIVE

  • ERROR

" }, "arn":{ "shape":"fraudDetectorArn", @@ -2064,7 +2335,7 @@ "type":"string", "max":90, "min":7, - "pattern":"^\\w{8}-\\w{4}-\\w{4}-\\w{4}-\\w{12}|DEFAULT|arn:[a-zA-Z0-9-]+:kms:[a-zA-Z0-9-]+:\\d{12}:key\\/\\w{8}-\\w{4}-\\w{4}-\\w{4}-\\w{12}$" + "pattern":"^DEFAULT|arn:[a-zA-Z0-9-]+:kms:[a-zA-Z0-9-]+:\\d{12}:key\\/\\w{8}-\\w{4}-\\w{4}-\\w{4}-\\w{12}$" }, "Label":{ "type":"structure", diff --git a/services/fsx/pom.xml b/services/fsx/pom.xml index 747a0283e1d6..c618c13658e0 100644 --- a/services/fsx/pom.xml +++ b/services/fsx/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT fsx AWS Java SDK :: Services :: FSx diff --git a/services/fsx/src/main/resources/codegen-resources/paginators-1.json b/services/fsx/src/main/resources/codegen-resources/paginators-1.json index 43df3a5f3a7a..000eb57d5e80 100644 --- a/services/fsx/src/main/resources/codegen-resources/paginators-1.json +++ b/services/fsx/src/main/resources/codegen-resources/paginators-1.json @@ -10,6 +10,11 @@ "output_token": "NextToken", "limit_key": "MaxResults" }, + "DescribeFileSystemAliases": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, "DescribeFileSystems": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/services/fsx/src/main/resources/codegen-resources/service-2.json b/services/fsx/src/main/resources/codegen-resources/service-2.json index 666b832558ef..60635b816a82 100644 --- a/services/fsx/src/main/resources/codegen-resources/service-2.json +++ b/services/fsx/src/main/resources/codegen-resources/service-2.json @@ -13,6 +13,21 @@ "uid":"fsx-2018-03-01" }, "operations":{ + "AssociateFileSystemAliases":{ + "name":"AssociateFileSystemAliases", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssociateFileSystemAliasesRequest"}, + "output":{"shape":"AssociateFileSystemAliasesResponse"}, + "errors":[ + {"shape":"BadRequest"}, + {"shape":"FileSystemNotFound"}, + {"shape":"InternalServerError"} + ], + "documentation":"

Use this action to associate one or more Domain Name Server (DNS) aliases with an existing Amazon FSx for Windows File Server file system. A file systen can have a maximum of 50 DNS aliases associated with it at any one time. If you try to associate a DNS alias that is already associated with the file system, FSx takes no action on that alias in the request. For more information, see Working with DNS Aliases and Walkthrough 5: Using DNS aliases to access your file system, including additional steps you must take to be able to access your file system using a DNS alias.

The system response shows the DNS aliases that Amazon FSx is attempting to associate with the file system. Use the API operation to monitor the status of the aliases Amazon FSx is associating with the file system.

" + }, "CancelDataRepositoryTask":{ "name":"CancelDataRepositoryTask", "http":{ @@ -48,7 +63,7 @@ {"shape":"ServiceLimitExceeded"}, {"shape":"InternalServerError"} ], - "documentation":"

Creates a backup of an existing Amazon FSx file system. Creating regular backups for your file system is a best practice, enabling you to restore a file system from a backup if an issue arises with the original file system.

For Amazon FSx for Lustre file systems, you can create a backup only for file systems with the following configuration:

  • a Persistent deployment type

  • is not linked to a data respository.

For more information about backing up Amazon FSx for Lustre file systems, see Working with FSx for Lustre backups.

For more information about backing up Amazon FSx for Lustre file systems, see Working with FSx for Windows backups.

If a backup with the specified client request token exists, and the parameters match, this operation returns the description of the existing backup. If a backup specified client request token exists, and the parameters don't match, this operation returns IncompatibleParameterError. If a backup with the specified client request token doesn't exist, CreateBackup does the following:

  • Creates a new Amazon FSx backup with an assigned ID, and an initial lifecycle state of CREATING.

  • Returns the description of the backup.

By using the idempotent operation, you can retry a CreateBackup operation without the risk of creating an extra backup. This approach can be useful when an initial call fails in a way that makes it unclear whether a backup was created. If you use the same client request token and the initial call created a backup, the operation returns a successful result because all the parameters are the same.

The CreateBackup operation returns while the backup's lifecycle state is still CREATING. You can check the backup creation status by calling the DescribeBackups operation, which returns the backup state along with other information.

", + "documentation":"

Creates a backup of an existing Amazon FSx file system. Creating regular backups for your file system is a best practice, enabling you to restore a file system from a backup if an issue arises with the original file system.

For Amazon FSx for Lustre file systems, you can create a backup only for file systems with the following configuration:

  • a Persistent deployment type

  • is not linked to a data respository.

For more information about backing up Amazon FSx for Lustre file systems, see Working with FSx for Lustre backups.

For more information about backing up Amazon FSx for Windows file systems, see Working with FSx for Windows backups.

If a backup with the specified client request token exists, and the parameters match, this operation returns the description of the existing backup. If a backup specified client request token exists, and the parameters don't match, this operation returns IncompatibleParameterError. If a backup with the specified client request token doesn't exist, CreateBackup does the following:

  • Creates a new Amazon FSx backup with an assigned ID, and an initial lifecycle state of CREATING.

  • Returns the description of the backup.

By using the idempotent operation, you can retry a CreateBackup operation without the risk of creating an extra backup. This approach can be useful when an initial call fails in a way that makes it unclear whether a backup was created. If you use the same client request token and the initial call created a backup, the operation returns a successful result because all the parameters are the same.

The CreateBackup operation returns while the backup's lifecycle state is still CREATING. You can check the backup creation status by calling the DescribeBackups operation, which returns the backup state along with other information.

", "idempotent":true }, "CreateDataRepositoryTask":{ @@ -68,7 +83,7 @@ {"shape":"InternalServerError"}, {"shape":"DataRepositoryTaskExecuting"} ], - "documentation":"

Creates an Amazon FSx for Lustre data repository task. You use data repository tasks to perform bulk operations between your Amazon FSx file system and its linked data repository. An example of a data repository task is exporting any data and metadata changes, including POSIX metadata, to files, directories, and symbolic links (symlinks) from your FSx file system to its linked data repository. A CreateDataRepositoryTask operation will fail if a data repository is not linked to the FSx file system. To learn more about data repository tasks, see Using Data Repository Tasks. To learn more about linking a data repository to your file system, see Setting the Export Prefix.

", + "documentation":"

Creates an Amazon FSx for Lustre data repository task. You use data repository tasks to perform bulk operations between your Amazon FSx file system and its linked data repository. An example of a data repository task is exporting any data and metadata changes, including POSIX metadata, to files, directories, and symbolic links (symlinks) from your FSx file system to its linked data repository. A CreateDataRepositoryTask operation will fail if a data repository is not linked to the FSx file system. To learn more about data repository tasks, see Data Repository Tasks. To learn more about linking a data repository to your file system, see Linking your file system to an S3 bucket.

", "idempotent":true }, "CreateFileSystem":{ @@ -183,6 +198,21 @@ ], "documentation":"

Returns the description of specific Amazon FSx for Lustre data repository tasks, if one or more TaskIds values are provided in the request, or if filters are used in the request. You can use filters to narrow the response to include just tasks for specific file systems, or tasks in a specific lifecycle state. Otherwise, it returns all data repository tasks owned by your AWS account in the AWS Region of the endpoint that you're calling.

When retrieving all tasks, you can paginate the response by using the optional MaxResults parameter to limit the number of tasks returned in a response. If more tasks remain, Amazon FSx returns a NextToken value in the response. In this case, send a later request with the NextToken request parameter set to the value of NextToken from the last response.

" }, + "DescribeFileSystemAliases":{ + "name":"DescribeFileSystemAliases", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeFileSystemAliasesRequest"}, + "output":{"shape":"DescribeFileSystemAliasesResponse"}, + "errors":[ + {"shape":"BadRequest"}, + {"shape":"FileSystemNotFound"}, + {"shape":"InternalServerError"} + ], + "documentation":"

Returns the DNS aliases that are associated with the specified Amazon FSx for Windows File Server file system. A history of all DNS aliases that have been associated with and disassociated from the file system is available in the list of AdministrativeAction provided in the DescribeFileSystems operation response.

" + }, "DescribeFileSystems":{ "name":"DescribeFileSystems", "http":{ @@ -198,6 +228,21 @@ ], "documentation":"

Returns the description of specific Amazon FSx file systems, if a FileSystemIds value is provided for that file system. Otherwise, it returns descriptions of all file systems owned by your AWS account in the AWS Region of the endpoint that you're calling.

When retrieving all file system descriptions, you can optionally specify the MaxResults parameter to limit the number of descriptions in a response. If more file system descriptions remain, Amazon FSx returns a NextToken value in the response. In this case, send a later request with the NextToken request parameter set to the value of NextToken from the last response.

This action is used in an iterative process to retrieve a list of your file system descriptions. DescribeFileSystems is called first without a NextTokenvalue. Then the action continues to be called with the NextToken parameter set to the value of the last NextToken value until a response has no NextToken.

When using this action, keep the following in mind:

  • The implementation might return fewer than MaxResults file system descriptions while still including a NextToken value.

  • The order of file systems returned in the response of one DescribeFileSystems call and the order of file systems returned across the responses of a multicall iteration is unspecified.

" }, + "DisassociateFileSystemAliases":{ + "name":"DisassociateFileSystemAliases", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisassociateFileSystemAliasesRequest"}, + "output":{"shape":"DisassociateFileSystemAliasesResponse"}, + "errors":[ + {"shape":"BadRequest"}, + {"shape":"FileSystemNotFound"}, + {"shape":"InternalServerError"} + ], + "documentation":"

Use this action to disassociate, or remove, one or more Domain Name Service (DNS) aliases from an Amazon FSx for Windows File Server file system. If you attempt to disassociate a DNS alias that is not associated with the file system, Amazon FSx responds with a 400 Bad Request. For more information, see Working with DNS Aliases.

The system generated response showing the DNS aliases that Amazon FSx is attempting to disassociate from the file system. Use the API operation to monitor the status of the aliases Amazon FSx is disassociating with the file system.

" + }, "ListTagsForResource":{ "name":"ListTagsForResource", "http":{ @@ -268,7 +313,7 @@ {"shape":"MissingFileSystemConfiguration"}, {"shape":"ServiceLimitExceeded"} ], - "documentation":"

Use this operation to update the configuration of an existing Amazon FSx file system. You can update multiple properties in a single request.

For Amazon FSx for Windows File Server file systems, you can update the following properties:

  • AutomaticBackupRetentionDays

  • DailyAutomaticBackupStartTime

  • SelfManagedActiveDirectoryConfiguration

  • StorageCapacity

  • ThroughputCapacity

  • WeeklyMaintenanceStartTime

For Amazon FSx for Lustre file systems, you can update the following properties:

  • AutoImportPolicy

  • AutomaticBackupRetentionDays

  • DailyAutomaticBackupStartTime

  • WeeklyMaintenanceStartTime

" + "documentation":"

Use this operation to update the configuration of an existing Amazon FSx file system. You can update multiple properties in a single request.

For Amazon FSx for Windows File Server file systems, you can update the following properties:

  • AutomaticBackupRetentionDays

  • DailyAutomaticBackupStartTime

  • SelfManagedActiveDirectoryConfiguration

  • StorageCapacity

  • ThroughputCapacity

  • WeeklyMaintenanceStartTime

For Amazon FSx for Lustre file systems, you can update the following properties:

  • AutoImportPolicy

  • AutomaticBackupRetentionDays

  • DailyAutomaticBackupStartTime

  • StorageCapacity

  • WeeklyMaintenanceStartTime

" } }, "shapes":{ @@ -332,7 +377,7 @@ "AdministrativeActionType":{"shape":"AdministrativeActionType"}, "ProgressPercent":{ "shape":"ProgressPercent", - "documentation":"

Provides the percent complete of a STORAGE_OPTIMIZATION administrative action.

" + "documentation":"

Provides the percent complete of a STORAGE_OPTIMIZATION administrative action. Does not apply to any other administrative action type.

" }, "RequestTime":{ "shape":"RequestTime", @@ -340,32 +385,34 @@ }, "Status":{ "shape":"Status", - "documentation":"

Describes the status of the administrative action, as follows:

  • FAILED - Amazon FSx failed to process the administrative action successfully.

  • IN_PROGRESS - Amazon FSx is processing the administrative action.

  • PENDING - Amazon FSx is waiting to process the administrative action.

  • COMPLETED - Amazon FSx has finished processing the administrative task.

  • UPDATED_OPTIMIZING - For a storage capacity increase update, Amazon FSx has updated the file system with the new storage capacity, and is now performing the storage optimization process. For more information, see Managing Storage Capacity.

" + "documentation":"

Describes the status of the administrative action, as follows:

  • FAILED - Amazon FSx failed to process the administrative action successfully.

  • IN_PROGRESS - Amazon FSx is processing the administrative action.

  • PENDING - Amazon FSx is waiting to process the administrative action.

  • COMPLETED - Amazon FSx has finished processing the administrative task.

  • UPDATED_OPTIMIZING - For a storage capacity increase update, Amazon FSx has updated the file system with the new storage capacity, and is now performing the storage optimization process. For more information, see Managing storage capacity in the Amazon FSx for Windows File Server User Guide and Managing storage and throughput capacity in the Amazon FSx for Lustre User Guide.

" }, "TargetFileSystemValues":{ "shape":"FileSystem", - "documentation":"

Describes the target StorageCapacity or ThroughputCapacity value provided in the UpdateFileSystem operation. Returned for FILE_SYSTEM_UPDATE administrative actions.

" + "documentation":"

Describes the target value for the administration action, provided in the UpdateFileSystem operation. Returned for FILE_SYSTEM_UPDATE administrative actions.

" }, "FailureDetails":{"shape":"AdministrativeActionFailureDetails"} }, - "documentation":"

Describes a specific Amazon FSx Administrative Action for the current Windows file system.

" + "documentation":"

Describes a specific Amazon FSx administrative action for the current Windows or Lustre file system.

" }, "AdministrativeActionFailureDetails":{ "type":"structure", "members":{ "Message":{ "shape":"ErrorMessage", - "documentation":"

Error message providing details about the failure.

" + "documentation":"

Error message providing details about the failed administrative action.

" } }, "documentation":"

Provides information about a failed administrative action.

" }, "AdministrativeActionType":{ "type":"string", - "documentation":"

Describes the type of administrative action, as follows:

  • FILE_SYSTEM_UPDATE - A file system update administrative action initiated by the user from the Amazon FSx console, API (UpdateFileSystem), or CLI (update-file-system). A

  • STORAGE_OPTIMIZATION - Once the FILE_SYSTEM_UPDATE task to increase a file system's storage capacity completes successfully, a STORAGE_OPTIMIZATION task starts. Storage optimization is the process of migrating the file system data to the new, larger disks. You can track the storage migration progress using the ProgressPercent property. When STORAGE_OPTIMIZATION completes successfully, the parent FILE_SYSTEM_UPDATE action status changes to COMPLETED. For more information, see Managing Storage Capacity.

", + "documentation":"

Describes the type of administrative action, as follows:

  • FILE_SYSTEM_UPDATE - A file system update administrative action initiated by the user from the Amazon FSx console, API (UpdateFileSystem), or CLI (update-file-system).

  • STORAGE_OPTIMIZATION - Once the FILE_SYSTEM_UPDATE task to increase a file system's storage capacity completes successfully, a STORAGE_OPTIMIZATION task starts.

    • For Windows, storage optimization is the process of migrating the file system data to the new, larger disks.

    • For Lustre, storage optimization consists of rebalancing the data across the existing and newly added file servers.

    You can track the storage optimization progress using the ProgressPercent property. When STORAGE_OPTIMIZATION completes successfully, the parent FILE_SYSTEM_UPDATE action status changes to COMPLETED. For more information, see Managing storage capacity in the Amazon FSx for Windows File Server User Guide and Managing storage and throughput capacity in the Amazon FSx for Lustre User Guide.

  • FILE_SYSTEM_ALIAS_ASSOCIATION - A file system update to associate a new DNS alias with the file system. For more information, see .

  • FILE_SYSTEM_ALIAS_DISASSOCIATION - A file system update to disassociate a DNS alias from the file system. For more information, see .

", "enum":[ "FILE_SYSTEM_UPDATE", - "STORAGE_OPTIMIZATION" + "STORAGE_OPTIMIZATION", + "FILE_SYSTEM_ALIAS_ASSOCIATION", + "FILE_SYSTEM_ALIAS_DISASSOCIATION" ] }, "AdministrativeActions":{ @@ -373,12 +420,85 @@ "member":{"shape":"AdministrativeAction"}, "max":50 }, + "Alias":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"AlternateDNSName", + "documentation":"

The name of the DNS alias. The alias name has to meet the following requirements:

  • Formatted as a fully-qualified domain name (FQDN), hostname.domain, for example, accounting.example.com.

  • Can contain alphanumeric characters and the hyphen (-).

  • Cannot start or end with a hyphen.

  • Can start with a numeric.

For DNS names, Amazon FSx stores alphabetic characters as lowercase letters (a-z), regardless of how you specify them: as uppercase letters, lowercase letters, or the corresponding letters in escape codes.

" + }, + "Lifecycle":{ + "shape":"AliasLifecycle", + "documentation":"

Describes the state of the DNS alias.

  • AVAILABLE - The DNS alias is associated with an Amazon FSx file system.

  • CREATING - Amazon FSx is creating the DNS alias and associating it with the file system.

  • CREATE_FAILED - Amazon FSx was unable to associate the DNS alias with the file system.

  • DELETING - Amazon FSx is disassociating the DNS alias from the file system and deleting it.

  • DELETE_FAILED - Amazon FSx was unable to disassocate the DNS alias from the file system.

" + } + }, + "documentation":"

A DNS alias that is associated with the file system. You can use a DNS alias to access a file system using user-defined DNS names, in addition to the default DNS name that Amazon FSx assigns to the file system. For more information, see DNS aliases in the FSx for Windows File Server User Guide.

" + }, + "AliasLifecycle":{ + "type":"string", + "enum":[ + "AVAILABLE", + "CREATING", + "DELETING", + "CREATE_FAILED", + "DELETE_FAILED" + ] + }, + "Aliases":{ + "type":"list", + "member":{"shape":"Alias"}, + "documentation":"

An array of one or more DNS aliases that are currently associated with the Amazon FSx file system. Aliases allow you to use existing DNS names to access the data in your Amazon FSx file system. You can associate up to 50 aliases with a file system at any time. You can associate additional DNS aliases after you create the file system using the AssociateFileSystemAliases operation. You can remove DNS aliases from the file system after it is created using the DisassociateFileSystemAliases operation. You only need to specify the alias name in the request payload. For more information, see DNS aliases.

", + "max":50 + }, + "AlternateDNSName":{ + "type":"string", + "max":253, + "min":4, + "pattern":"^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{4,253}$" + }, + "AlternateDNSNames":{ + "type":"list", + "member":{"shape":"AlternateDNSName"}, + "max":50 + }, "ArchivePath":{ "type":"string", - "max":900, + "max":4357, "min":3, "pattern":"^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{3,4357}$" }, + "AssociateFileSystemAliasesRequest":{ + "type":"structure", + "required":[ + "FileSystemId", + "Aliases" + ], + "members":{ + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "idempotencyToken":true + }, + "FileSystemId":{ + "shape":"FileSystemId", + "documentation":"

Specifies the file system with which you want to associate one or more DNS aliases.

" + }, + "Aliases":{ + "shape":"AlternateDNSNames", + "documentation":"

An array of one or more DNS alias names to associate with the file system. The alias name has to comply with the following formatting requirements:

  • Formatted as a fully-qualified domain name (FQDN), hostname.domain , for example, accounting.corp.example.com.

  • Can contain alphanumeric characters and the hyphen (-).

  • Cannot start or end with a hyphen.

  • Can start with a numeric.

For DNS alias names, Amazon FSx stores alphabetic characters as lowercase letters (a-z), regardless of how you specify them: as uppercase letters, lowercase letters, or the corresponding letters in escape codes.

" + } + }, + "documentation":"

The request object specifying one or more DNS alias names to associate with an Amazon FSx for Windows File Server file system.

" + }, + "AssociateFileSystemAliasesResponse":{ + "type":"structure", + "members":{ + "Aliases":{ + "shape":"Aliases", + "documentation":"

An array of the DNS aliases that Amazon FSx is associating with the file system.

" + } + }, + "documentation":"

The system generated response showing the DNS aliases that Amazon FSx is attempting to associate with the file system. Use the API operation to monitor the status of the aliases Amazon FSx is associating with the file system. It can take up to 2.5 minutes for the alias status to change from CREATING to AVAILABLE.

" + }, "AutoImportPolicyType":{ "type":"string", "enum":[ @@ -409,7 +529,7 @@ }, "Lifecycle":{ "shape":"BackupLifecycle", - "documentation":"

The lifecycle status of the backup.

  • AVAILABLE - The backup is fully available.

  • CREATING - FSx is creating the backup.

  • TRANSFERRING - For Lustre file systems only; FSx is transferring the backup to S3.

  • DELETED - The backup was deleted is no longer available.

  • FAILED - Amazon FSx could not complete the backup.

" + "documentation":"

The lifecycle status of the backup.

  • AVAILABLE - The backup is fully available.

  • PENDING - For user-initiated backups on Lustre file systems only; Amazon FSx has not started creating the backup.

  • CREATING - Amazon FSx is creating the backup.

  • TRANSFERRING - For user-initiated backups on Lustre file systems only; Amazon FSx is transferring the backup to S3.

  • DELETED - Amazon FSx deleted the backup and it is no longer available.

  • FAILED - Amazon FSx could not complete the backup.

" }, "FailureDetails":{ "shape":"BackupFailureDetails", @@ -445,7 +565,7 @@ "documentation":"

The configuration of the self-managed Microsoft Active Directory (AD) to which the Windows File Server instance is joined.

" } }, - "documentation":"

A backup of an Amazon FSx for file system.

" + "documentation":"

A backup of an Amazon FSx file system. For more information see:

" }, "BackupFailureDetails":{ "type":"structure", @@ -480,13 +600,14 @@ }, "BackupLifecycle":{ "type":"string", - "documentation":"

The lifecycle status of the backup.

  • AVAILABLE - The backup is fully available.

  • CREATING - FSx is creating the new user-intiated backup

  • TRANSFERRING - For user-initiated backups on Lustre file systems only; FSx is backing up the file system.

  • DELETED - The backup was deleted is no longer available.

  • FAILED - Amazon FSx could not complete the backup.

", + "documentation":"

The lifecycle status of the backup.

  • AVAILABLE - The backup is fully available.

  • PENDING - For user-initiated backups on Lustre file systems only; Amazon FSx has not started creating the backup.

  • CREATING - Amazon FSx is creating the new user-intiated backup

  • TRANSFERRING - For user-initiated backups on Lustre file systems only; Amazon FSx is backing up the file system.

  • DELETED - Amazon FSx deleted the backup and it is no longer available.

  • FAILED - Amazon FSx could not complete the backup.

", "enum":[ "AVAILABLE", "CREATING", "TRANSFERRING", "DELETED", - "FAILED" + "FAILED", + "PENDING" ] }, "BackupNotFound":{ @@ -514,7 +635,8 @@ "documentation":"

The type of the backup.

", "enum":[ "AUTOMATIC", - "USER_INITIATED" + "USER_INITIATED", + "AWS_BACKUP" ] }, "Backups":{ @@ -835,6 +957,10 @@ "CopyTagsToBackups":{ "shape":"Flag", "documentation":"

A boolean flag indicating whether tags for the file system should be copied to backups. This value defaults to false. If it's set to true, all tags for the file system are copied to all automatic and user-initiated backups where the user doesn't specify tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value.

" + }, + "Aliases":{ + "shape":"AlternateDNSNames", + "documentation":"

An array of one or more DNS alias names that you want to associate with the Amazon FSx file system. Aliases allow you to use existing DNS names to access the data in your Amazon FSx file system. You can associate up to 50 aliases with a file system at any time. You can associate additional DNS aliases after you create the file system using the AssociateFileSystemAliases operation. You can remove DNS aliases from the file system after it is created using the DisassociateFileSystemAliases operation. You only need to specify the alias name in the request payload.

For more information, see Working with DNS Aliases and Walkthrough 5: Using DNS aliases to access your file system, including additional steps you must take to be able to access your file system using a DNS alias.

An alias name has to meet the following requirements:

  • Formatted as a fully-qualified domain name (FQDN), hostname.domain, for example, accounting.example.com.

  • Can contain alphanumeric characters and the hyphen (-).

  • Cannot start or end with a hyphen.

  • Can start with a numeric.

For DNS alias names, Amazon FSx stores alphabetic characters as lowercase letters (a-z), regardless of how you specify them: as uppercase letters, lowercase letters, or the corresponding letters in escape codes.

" } }, "documentation":"

The configuration object for the Microsoft Windows file system used in CreateFileSystem and CreateFileSystemFromBackup operations.

" @@ -1253,6 +1379,43 @@ "NextToken":{"shape":"NextToken"} } }, + "DescribeFileSystemAliasesRequest":{ + "type":"structure", + "required":["FileSystemId"], + "members":{ + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "idempotencyToken":true + }, + "FileSystemId":{ + "shape":"FileSystemId", + "documentation":"

The ID of the file system to return the associated DNS aliases for (String).

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

Maximum number of DNS aliases to return in the response (integer). This parameter value must be greater than 0. The number of items that Amazon FSx returns is the minimum of the MaxResults parameter specified in the request and the service's internal maximum number of items per page.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

Opaque pagination token returned from a previous DescribeFileSystemAliases operation (String). If a token is included in the request, the action continues the list from where the previous returning call left off.

" + } + }, + "documentation":"

The request object for DescribeFileSystemAliases operation.

" + }, + "DescribeFileSystemAliasesResponse":{ + "type":"structure", + "members":{ + "Aliases":{ + "shape":"Aliases", + "documentation":"

An array of one or more DNS aliases currently associated with the specified file system.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

Present if there are more DNS aliases than returned in the response (String). You can use the NextToken value in a later request to fetch additional descriptions.

" + } + }, + "documentation":"

The response object for DescribeFileSystemAliases operation.

" + }, "DescribeFileSystemsRequest":{ "type":"structure", "members":{ @@ -1304,6 +1467,38 @@ "min":1, "pattern":"^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{1,256}$" }, + "DisassociateFileSystemAliasesRequest":{ + "type":"structure", + "required":[ + "FileSystemId", + "Aliases" + ], + "members":{ + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "idempotencyToken":true + }, + "FileSystemId":{ + "shape":"FileSystemId", + "documentation":"

Specifies the file system from which to disassociate the DNS aliases.

" + }, + "Aliases":{ + "shape":"AlternateDNSNames", + "documentation":"

An array of one or more DNS alias names to disassociate, or remove, from the file system.

" + } + }, + "documentation":"

The request object of DNS aliases to disassociate from an Amazon FSx for Windows File Server file system.

" + }, + "DisassociateFileSystemAliasesResponse":{ + "type":"structure", + "members":{ + "Aliases":{ + "shape":"Aliases", + "documentation":"

An array of one or more DNS aliases that Amazon FSx is attempting to disassociate from the file system.

" + } + }, + "documentation":"

The system generated response showing the DNS aliases that Amazon FSx is attempting to disassociate from the file system. Use the API operation to monitor the status of the aliases Amazon FSx is removing from the file system.

" + }, "DnsIps":{ "type":"list", "member":{"shape":"IpAddress"}, @@ -1351,7 +1546,7 @@ "FailureDetails":{"shape":"FileSystemFailureDetails"}, "StorageCapacity":{ "shape":"StorageCapacity", - "documentation":"

The storage capacity of the file system in gigabytes (GB).

" + "documentation":"

The storage capacity of the file system in gibibytes (GiB).

" }, "StorageType":{ "shape":"StorageType", @@ -2089,7 +2284,7 @@ }, "StorageCapacity":{ "shape":"StorageCapacity", - "documentation":"

Use this parameter to increase the storage capacity of an Amazon FSx for Windows File Server file system. Specifies the storage capacity target value, GiB, for the file system you're updating. The storage capacity target value must be at least 10 percent (%) greater than the current storage capacity value. In order to increase storage capacity, the file system needs to have at least 16 MB/s of throughput capacity. You cannot make a storage capacity increase request if there is an existing storage capacity increase request in progress. For more information, see Managing Storage Capacity.

" + "documentation":"

Use this parameter to increase the storage capacity of an Amazon FSx file system. Specifies the storage capacity target value, GiB, to increase the storage capacity for the file system that you're updating. You cannot make a storage capacity increase request if there is an existing storage capacity increase request in progress.

For Windows file systems, the storage capacity target value must be at least 10 percent (%) greater than the current storage capacity value. In order to increase storage capacity, the file system must have at least 16 MB/s of throughput capacity.

For Lustre file systems, the storage capacity target value can be the following:

  • For SCRATCH_2 and PERSISTENT_1 SSD deployment types, valid values are in multiples of 2400 GiB. The value must be greater than the current storage capacity.

  • For PERSISTENT HDD file systems, valid values are multiples of 6000 GiB for 12 MB/s/TiB file systems and multiples of 1800 GiB for 40 MB/s/TiB file systems. The values must be greater than the current storage capacity.

  • For SCRATCH_1 file systems, you cannot increase the storage capacity.

For more information, see Managing storage capacity in the Amazon FSx for Windows File Server User Guide and Managing storage and throughput capacity in the Amazon FSx for Lustre User Guide.

" }, "WindowsConfiguration":{ "shape":"UpdateFileSystemWindowsConfiguration", @@ -2204,7 +2399,8 @@ "CopyTagsToBackups":{ "shape":"Flag", "documentation":"

A boolean flag indicating whether tags on the file system should be copied to backups. This value defaults to false. If it's set to true, all tags on the file system are copied to all automatic backups and any user-initiated backups where the user doesn't specify any tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value.

" - } + }, + "Aliases":{"shape":"Aliases"} }, "documentation":"

The configuration for this Microsoft Windows file system.

" } diff --git a/services/gamelift/pom.xml b/services/gamelift/pom.xml index 60b73d43c5f2..baefd6dfe4ba 100644 --- a/services/gamelift/pom.xml +++ b/services/gamelift/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT gamelift AWS Java SDK :: Services :: AWS GameLift diff --git a/services/gamelift/src/main/resources/codegen-resources/service-2.json b/services/gamelift/src/main/resources/codegen-resources/service-2.json index 6487a6e742e2..3b533c0e7223 100755 --- a/services/gamelift/src/main/resources/codegen-resources/service-2.json +++ b/services/gamelift/src/main/resources/codegen-resources/service-2.json @@ -26,7 +26,7 @@ {"shape":"InternalServiceException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

Registers a player's acceptance or rejection of a proposed FlexMatch match. A matchmaking configuration may require player acceptance; if so, then matches built with that configuration cannot be completed unless all players accept the proposed match within a specified time limit.

When FlexMatch builds a match, all the matchmaking tickets involved in the proposed match are placed into status REQUIRES_ACCEPTANCE. This is a trigger for your game to get acceptance from all players in the ticket. Acceptances are only valid for tickets when they are in this status; all other acceptances result in an error.

To register acceptance, specify the ticket ID, a response, and one or more players. Once all players have registered acceptance, the matchmaking tickets advance to status PLACING, where a new game session is created for the match.

If any player rejects the match, or if acceptances are not received before a specified timeout, the proposed match is dropped. The matchmaking tickets are then handled in one of two ways: For tickets where one or more players rejected the match, the ticket status is returned to SEARCHING to find a new match. For tickets where one or more players failed to respond, the ticket status is set to CANCELLED, and processing is terminated. A new matchmaking request for these players can be submitted as needed.

Learn more

Add FlexMatch to a Game Client

FlexMatch Events Reference

Related operations

" + "documentation":"

Registers a player's acceptance or rejection of a proposed FlexMatch match. A matchmaking configuration may require player acceptance; if so, then matches built with that configuration cannot be completed unless all players accept the proposed match within a specified time limit.

When FlexMatch builds a match, all the matchmaking tickets involved in the proposed match are placed into status REQUIRES_ACCEPTANCE. This is a trigger for your game to get acceptance from all players in the ticket. Acceptances are only valid for tickets when they are in this status; all other acceptances result in an error.

To register acceptance, specify the ticket ID, a response, and one or more players. Once all players have registered acceptance, the matchmaking tickets advance to status PLACING, where a new game session is created for the match.

If any player rejects the match, or if acceptances are not received before a specified timeout, the proposed match is dropped. The matchmaking tickets are then handled in one of two ways: For tickets where one or more players rejected the match, the ticket status is returned to SEARCHING to find a new match. For tickets where one or more players failed to respond, the ticket status is set to CANCELLED, and processing is terminated. A new matchmaking request for these players can be submitted as needed.

Learn more

Add FlexMatch to a Game Client

FlexMatch Events Reference

Related operations

" }, "ClaimGameServer":{ "name":"ClaimGameServer", @@ -172,7 +172,7 @@ {"shape":"UnsupportedRegionException"}, {"shape":"TaggingFailedException"} ], - "documentation":"

Defines a new matchmaking configuration for use with FlexMatch. A matchmaking configuration sets out guidelines for matching players and getting the matches into games. You can set up multiple matchmaking configurations to handle the scenarios needed for your game. Each matchmaking ticket (StartMatchmaking or StartMatchBackfill) specifies a configuration for the match and provides player attributes to support the configuration being used.

To create a matchmaking configuration, at a minimum you must specify the following: configuration name; a rule set that governs how to evaluate players and find acceptable matches; a game session queue to use when placing a new game session for the match; and the maximum time allowed for a matchmaking attempt.

To track the progress of matchmaking tickets, set up an Amazon Simple Notification Service (SNS) to receive notifications, and provide the topic ARN in the matchmaking configuration. An alternative method, continuously poling ticket status with DescribeMatchmaking, should only be used for games in development with low matchmaking usage.

Learn more

Design a FlexMatch Matchmaker

Set Up FlexMatch Event Notification

Related operations

" + "documentation":"

Defines a new matchmaking configuration for use with FlexMatch. Whether your are using FlexMatch with GameLift hosting or as a standalone matchmaking service, the matchmaking configuration sets out rules for matching players and forming teams. If you're also using GameLift hosting, it defines how to start game sessions for each match. Your matchmaking system can use multiple configurations to handle different game scenarios. All matchmaking requests (StartMatchmaking or StartMatchBackfill) identify the matchmaking configuration to use and provide player attributes consistent with that configuration.

To create a matchmaking configuration, you must provide the following: configuration name and FlexMatch mode (with or without GameLift hosting); a rule set that specifies how to evaluate players and find acceptable matches; whether player acceptance is required; and the maximum time allowed for a matchmaking attempt. When using FlexMatch with GameLift hosting, you also need to identify the game session queue to use when starting a game session for the match.

In addition, you must set up an Amazon Simple Notification Service (SNS) to receive matchmaking notifications, and provide the topic ARN in the matchmaking configuration. An alternative method, continuously polling ticket status with DescribeMatchmaking, is only suitable for games in development with low matchmaking usage.

Learn more

FlexMatch Developer Guide

Design a FlexMatch Matchmaker

Set Up FlexMatch Event Notification

Related operations

" }, "CreateMatchmakingRuleSet":{ "name":"CreateMatchmakingRuleSet", @@ -188,7 +188,7 @@ {"shape":"UnsupportedRegionException"}, {"shape":"TaggingFailedException"} ], - "documentation":"

Creates a new rule set for FlexMatch matchmaking. A rule set describes the type of match to create, such as the number and size of teams. It also sets the parameters for acceptable player matches, such as minimum skill level or character type. A rule set is used by a MatchmakingConfiguration.

To create a matchmaking rule set, provide unique rule set name and the rule set body in JSON format. Rule sets must be defined in the same Region as the matchmaking configuration they are used with.

Since matchmaking rule sets cannot be edited, it is a good idea to check the rule set syntax using ValidateMatchmakingRuleSet before creating a new rule set.

Learn more

Related operations

" + "documentation":"

Creates a new rule set for FlexMatch matchmaking. A rule set describes the type of match to create, such as the number and size of teams. It also sets the parameters for acceptable player matches, such as minimum skill level or character type. A rule set is used by a MatchmakingConfiguration.

To create a matchmaking rule set, provide unique rule set name and the rule set body in JSON format. Rule sets must be defined in the same Region as the matchmaking configuration they are used with.

Since matchmaking rule sets cannot be edited, it is a good idea to check the rule set syntax using ValidateMatchmakingRuleSet before creating a new rule set.

Learn more

Related operations

" }, "CreatePlayerSession":{ "name":"CreatePlayerSession", @@ -391,7 +391,7 @@ {"shape":"NotFoundException"}, {"shape":"TaggingFailedException"} ], - "documentation":"

Deletes an existing matchmaking rule set. To delete the rule set, provide the rule set name. Rule sets cannot be deleted if they are currently being used by a matchmaking configuration.

Learn more

Related operations

" + "documentation":"

Deletes an existing matchmaking rule set. To delete the rule set, provide the rule set name. Rule sets cannot be deleted if they are currently being used by a matchmaking configuration.

Learn more

Related operations

" }, "DeleteScalingPolicy":{ "name":"DeleteScalingPolicy", @@ -741,7 +741,7 @@ {"shape":"InternalServiceException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

Retrieves one or more matchmaking tickets. Use this operation to retrieve ticket information, including--after a successful match is made--connection information for the resulting new game session.

To request matchmaking tickets, provide a list of up to 10 ticket IDs. If the request is successful, a ticket object is returned for each requested ID that currently exists.

This operation is not designed to be continually called to track matchmaking ticket status. This practice can cause you to exceed your API limit, which results in errors. Instead, as a best practice, set up an Amazon Simple Notification Service (SNS) to receive notifications, and provide the topic ARN in the matchmaking configuration. Continuously poling ticket status with DescribeMatchmaking should only be used for games in development with low matchmaking usage.

Learn more

Add FlexMatch to a Game Client

Set Up FlexMatch Event Notification

Related operations

" + "documentation":"

Retrieves one or more matchmaking tickets. Use this operation to retrieve ticket information, including--after a successful match is made--connection information for the resulting new game session.

To request matchmaking tickets, provide a list of up to 10 ticket IDs. If the request is successful, a ticket object is returned for each requested ID that currently exists.

This operation is not designed to be continually called to track matchmaking ticket status. This practice can cause you to exceed your API limit, which results in errors. Instead, as a best practice, set up an Amazon Simple Notification Service (SNS) to receive notifications, and provide the topic ARN in the matchmaking configuration. Continuously poling ticket status with DescribeMatchmaking should only be used for games in development with low matchmaking usage.

Learn more

Add FlexMatch to a Game Client

Set Up FlexMatch Event Notification

Related operations

" }, "DescribeMatchmakingConfigurations":{ "name":"DescribeMatchmakingConfigurations", @@ -756,7 +756,7 @@ {"shape":"InternalServiceException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

Retrieves the details of FlexMatch matchmaking configurations.

This operation offers the following options: (1) retrieve all matchmaking configurations, (2) retrieve configurations for a specified list, or (3) retrieve all configurations that use a specified rule set name. When requesting multiple items, use the pagination parameters to retrieve results as a set of sequential pages.

If successful, a configuration is returned for each requested name. When specifying a list of names, only configurations that currently exist are returned.

Learn more

Setting Up FlexMatch Matchmakers

Related operations

" + "documentation":"

Retrieves the details of FlexMatch matchmaking configurations.

This operation offers the following options: (1) retrieve all matchmaking configurations, (2) retrieve configurations for a specified list, or (3) retrieve all configurations that use a specified rule set name. When requesting multiple items, use the pagination parameters to retrieve results as a set of sequential pages.

If successful, a configuration is returned for each requested name. When specifying a list of names, only configurations that currently exist are returned.

Learn more

Setting Up FlexMatch Matchmakers

Related operations

" }, "DescribeMatchmakingRuleSets":{ "name":"DescribeMatchmakingRuleSets", @@ -772,7 +772,7 @@ {"shape":"NotFoundException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

Retrieves the details for FlexMatch matchmaking rule sets. You can request all existing rule sets for the Region, or provide a list of one or more rule set names. When requesting multiple items, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a rule set is returned for each requested name.

Learn more

Related operations

" + "documentation":"

Retrieves the details for FlexMatch matchmaking rule sets. You can request all existing rule sets for the Region, or provide a list of one or more rule set names. When requesting multiple items, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a rule set is returned for each requested name.

Learn more

Related operations

" }, "DescribePlayerSessions":{ "name":"DescribePlayerSessions", @@ -1153,7 +1153,7 @@ {"shape":"InternalServiceException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

Finds new players to fill open slots in an existing game session. This operation can be used to add players to matched games that start with fewer than the maximum number of players or to replace players when they drop out. By backfilling with the same matchmaker used to create the original match, you ensure that new players meet the match criteria and maintain a consistent experience throughout the game session. You can backfill a match anytime after a game session has been created.

To request a match backfill, specify a unique ticket ID, the existing game session's ARN, a matchmaking configuration, and a set of data that describes all current players in the game session. If successful, a match backfill ticket is created and returned with status set to QUEUED. The ticket is placed in the matchmaker's ticket pool and processed. Track the status of the ticket to respond as needed.

The process of finding backfill matches is essentially identical to the initial matchmaking process. The matchmaker searches the pool and groups tickets together to form potential matches, allowing only one backfill ticket per potential match. Once the a match is formed, the matchmaker creates player sessions for the new players. All tickets in the match are updated with the game session's connection information, and the GameSession object is updated to include matchmaker data on the new players. For more detail on how match backfill requests are processed, see How Amazon GameLift FlexMatch Works.

Learn more

Backfill Existing Games with FlexMatch

How GameLift FlexMatch Works

Related operations

" + "documentation":"

Finds new players to fill open slots in an existing game session. This operation can be used to add players to matched games that start with fewer than the maximum number of players or to replace players when they drop out. By backfilling with the same matchmaker used to create the original match, you ensure that new players meet the match criteria and maintain a consistent experience throughout the game session. You can backfill a match anytime after a game session has been created.

To request a match backfill, specify a unique ticket ID, the existing game session's ARN, a matchmaking configuration, and a set of data that describes all current players in the game session. If successful, a match backfill ticket is created and returned with status set to QUEUED. The ticket is placed in the matchmaker's ticket pool and processed. Track the status of the ticket to respond as needed.

The process of finding backfill matches is essentially identical to the initial matchmaking process. The matchmaker searches the pool and groups tickets together to form potential matches, allowing only one backfill ticket per potential match. Once the a match is formed, the matchmaker creates player sessions for the new players. All tickets in the match are updated with the game session's connection information, and the GameSession object is updated to include matchmaker data on the new players. For more detail on how match backfill requests are processed, see How Amazon GameLift FlexMatch Works.

Learn more

Backfill Existing Games with FlexMatch

How GameLift FlexMatch Works

Related operations

" }, "StartMatchmaking":{ "name":"StartMatchmaking", @@ -1169,7 +1169,7 @@ {"shape":"InternalServiceException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

Uses FlexMatch to create a game match for a group of players based on custom matchmaking rules, and starts a new game for the matched players. Each matchmaking request specifies the type of match to build (team configuration, rules for an acceptable match, etc.). The request also specifies the players to find a match for and where to host the new game session for optimal performance. A matchmaking request might start with a single player or a group of players who want to play together. FlexMatch finds additional players as needed to fill the match. Match type, rules, and the queue used to place a new game session are defined in a MatchmakingConfiguration.

To start matchmaking, provide a unique ticket ID, specify a matchmaking configuration, and include the players to be matched. You must also include a set of player attributes relevant for the matchmaking configuration. If successful, a matchmaking ticket is returned with status set to QUEUED.

Track the status of the ticket to respond as needed and acquire game session connection information for successfully completed matches. Ticket status updates are tracked using event notification through Amazon Simple Notification Service (SNS), which is defined in the matchmaking configuration.

Processing a matchmaking request -- FlexMatch handles a matchmaking request as follows:

  1. Your client code submits a StartMatchmaking request for one or more players and tracks the status of the request ticket.

  2. FlexMatch uses this ticket and others in process to build an acceptable match. When a potential match is identified, all tickets in the proposed match are advanced to the next status.

  3. If the match requires player acceptance (set in the matchmaking configuration), the tickets move into status REQUIRES_ACCEPTANCE. This status triggers your client code to solicit acceptance from all players in every ticket involved in the match, and then call AcceptMatch for each player. If any player rejects or fails to accept the match before a specified timeout, the proposed match is dropped (see AcceptMatch for more details).

  4. Once a match is proposed and accepted, the matchmaking tickets move into status PLACING. FlexMatch locates resources for a new game session using the game session queue (set in the matchmaking configuration) and creates the game session based on the match data.

  5. When the match is successfully placed, the matchmaking tickets move into COMPLETED status. Connection information (including game session endpoint and player session) is added to the matchmaking tickets. Matched players can use the connection information to join the game.

Learn more

Add FlexMatch to a Game Client

Set Up FlexMatch Event Notification

FlexMatch Integration Roadmap

How GameLift FlexMatch Works

Related operations

" + "documentation":"

Uses FlexMatch to create a game match for a group of players based on custom matchmaking rules. If you're also using GameLift hosting, a new game session is started for the matched players. Each matchmaking request identifies one or more players to find a match for, and specifies the type of match to build, including the team configuration and the rules for an acceptable match. When a matchmaking request identifies a group of players who want to play together, FlexMatch finds additional players to fill the match. Match type, rules, and other features are defined in a MatchmakingConfiguration.

To start matchmaking, provide a unique ticket ID, specify a matchmaking configuration, and include the players to be matched. For each player, you must also include the player attribute values that are required by the matchmaking configuration (in the rule set). If successful, a matchmaking ticket is returned with status set to QUEUED.

Track the status of the ticket to respond as needed. If you're also using GameLift hosting, a successfully completed ticket contains game session connection information. Ticket status updates are tracked using event notification through Amazon Simple Notification Service (SNS), which is defined in the matchmaking configuration.

Learn more

Add FlexMatch to a Game Client

Set Up FlexMatch Event Notification

FlexMatch Integration Roadmap

How GameLift FlexMatch Works

Related operations

" }, "StopFleetActions":{ "name":"StopFleetActions", @@ -1217,7 +1217,7 @@ {"shape":"InternalServiceException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

Cancels a matchmaking ticket or match backfill ticket that is currently being processed. To stop the matchmaking operation, specify the ticket ID. If successful, work on the ticket is stopped, and the ticket status is changed to CANCELLED.

This call is also used to turn off automatic backfill for an individual game session. This is for game sessions that are created with a matchmaking configuration that has automatic backfill enabled. The ticket ID is included in the MatchmakerData of an updated game session object, which is provided to the game server.

If the operation is successful, the service sends back an empty JSON struct with the HTTP 200 response (not an empty HTTP body).

Learn more

Add FlexMatch to a Game Client

Related operations

" + "documentation":"

Cancels a matchmaking ticket or match backfill ticket that is currently being processed. To stop the matchmaking operation, specify the ticket ID. If successful, work on the ticket is stopped, and the ticket status is changed to CANCELLED.

This call is also used to turn off automatic backfill for an individual game session. This is for game sessions that are created with a matchmaking configuration that has automatic backfill enabled. The ticket ID is included in the MatchmakerData of an updated game session object, which is provided to the game server.

If the operation is successful, the service sends back an empty JSON struct with the HTTP 200 response (not an empty HTTP body).

Learn more

Add FlexMatch to a Game Client

Related operations

" }, "SuspendGameServerGroup":{ "name":"SuspendGameServerGroup", @@ -1436,7 +1436,7 @@ {"shape":"InternalServiceException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

Updates settings for a FlexMatch matchmaking configuration. These changes affect all matches and game sessions that are created after the update. To update settings, specify the configuration name to be updated and provide the new settings.

Learn more

Design a FlexMatch Matchmaker

Related operations

" + "documentation":"

Updates settings for a FlexMatch matchmaking configuration. These changes affect all matches and game sessions that are created after the update. To update settings, specify the configuration name to be updated and provide the new settings.

Learn more

Design a FlexMatch Matchmaker

Related operations

" }, "UpdateRuntimeConfiguration":{ "name":"UpdateRuntimeConfiguration", @@ -1484,7 +1484,7 @@ {"shape":"UnsupportedRegionException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Validates the syntax of a matchmaking rule or rule set. This operation checks that the rule set is using syntactically correct JSON and that it conforms to allowed property expressions. To validate syntax, provide a rule set JSON string.

Learn more

Related operations

" + "documentation":"

Validates the syntax of a matchmaking rule or rule set. This operation checks that the rule set is using syntactically correct JSON and that it conforms to allowed property expressions. To validate syntax, provide a rule set JSON string.

Learn more

Related operations

" } }, "shapes":{ @@ -1820,7 +1820,7 @@ }, "StorageLocation":{ "shape":"S3Location", - "documentation":"

Information indicating where your game build files are stored. Use this parameter only when creating a build with files stored in an S3 bucket that you own. The storage location must specify an S3 bucket name and key. The location must also specify a role ARN that you set up to allow Amazon GameLift to access your S3 bucket. The S3 bucket and your new build must be in the same Region.

" + "documentation":"

The location where your game build files are stored. Use this parameter only when creating a build using files that are stored in an S3 bucket that you own. Identify an S3 bucket name and key, which must in the same Region where you're creating a build. This parameter must also specify the ARN for an IAM role that you've set up to give Amazon GameLift access your S3 bucket. To call this operation with a storage location, you must have IAM PassRole permission. For more details on IAM roles and PassRole permissions, see Set up a role for GameLift access.

" }, "OperatingSystem":{ "shape":"OperatingSystem", @@ -1924,7 +1924,7 @@ }, "InstanceRoleArn":{ "shape":"NonEmptyString", - "documentation":"

A unique identifier for an AWS IAM role that manages access to your AWS services. With an instance role ARN set, any application that runs on an instance in this fleet can assume the role, including install scripts, server processes, and daemons (background processes). Create a role or look up a role's ARN from the IAM dashboard in the AWS Management Console. Learn more about using on-box credentials for your game servers at Access external resources from a game server.

" + "documentation":"

A unique identifier for an AWS IAM role that manages access to your AWS services. Fleets with an instance role ARN allow applications that are running on the fleet's instances to assume the role. Learn more about using on-box credentials for your game servers at Access external resources from a game server. To call this operation with instance role ARN, you must have IAM PassRole permissions. See IAM policy examples for GameLift.

" }, "CertificateConfiguration":{ "shape":"CertificateConfiguration", @@ -1996,7 +1996,7 @@ }, "VpcSubnets":{ "shape":"VpcSubnets", - "documentation":"

A list of virtual private cloud (VPC) subnets to use with instances in the game server group. By default, all GameLift FleetIQ-supported Availability Zones are used. You can use this parameter to specify VPCs that you've set up. This property cannot be updated after the game server group is created, and the corresponding Auto Scaling group will always use the property value that is set with this request, even if the Auto Scaling group is updated directly

" + "documentation":"

A list of virtual private cloud (VPC) subnets to use with instances in the game server group. By default, all GameLift FleetIQ-supported Availability Zones are used. You can use this parameter to specify VPCs that you've set up. This property cannot be updated after the game server group is created, and the corresponding Auto Scaling group will always use the property value that is set with this request, even if the Auto Scaling group is updated directly.

" }, "Tags":{ "shape":"TagList", @@ -2107,7 +2107,6 @@ "type":"structure", "required":[ "Name", - "GameSessionQueueArns", "RequestTimeoutSeconds", "AcceptanceRequired", "RuleSetName" @@ -2123,7 +2122,7 @@ }, "GameSessionQueueArns":{ "shape":"QueueArnsList", - "documentation":"

Amazon Resource Name (ARN) that is assigned to a GameLift game session queue resource and uniquely identifies it. ARNs are unique across all Regions. These queues are used when placing game sessions for matches that are created with this matchmaking configuration. Queues can be located in any Region.

" + "documentation":"

Amazon Resource Name (ARN) that is assigned to a GameLift game session queue resource and uniquely identifies it. ARNs are unique across all Regions. Queues can be located in any Region. Queues are used to start new GameLift-hosted game sessions for matches that are created with this matchmaking configuration. If FlexMatchMode is set to STANDALONE, do not set this parameter.

" }, "RequestTimeoutSeconds":{ "shape":"MatchmakingRequestTimeoutInteger", @@ -2131,11 +2130,11 @@ }, "AcceptanceTimeoutSeconds":{ "shape":"MatchmakingAcceptanceTimeoutInteger", - "documentation":"

The length of time (in seconds) to wait for players to accept a proposed match. If any player rejects the match or fails to accept before the timeout, the ticket continues to look for an acceptable match.

" + "documentation":"

The length of time (in seconds) to wait for players to accept a proposed match, if acceptance is required. If any player rejects the match or fails to accept before the timeout, the tickets are returned to the ticket pool and continue to be evaluated for an acceptable match.

" }, "AcceptanceRequired":{ "shape":"BooleanModel", - "documentation":"

A flag that determines whether a match that was created with this configuration must be accepted by the matched players. To require acceptance, set to TRUE.

" + "documentation":"

A flag that determines whether a match that was created with this configuration must be accepted by the matched players. To require acceptance, set to TRUE. With this option enabled, matchmaking tickets use the status REQUIRES_ACCEPTANCE to indicate when a completed potential match is waiting for player acceptance.

" }, "RuleSetName":{ "shape":"MatchmakingRuleSetName", @@ -2147,7 +2146,7 @@ }, "AdditionalPlayerCount":{ "shape":"WholeNumber", - "documentation":"

The number of player slots in a match to keep open for future players. For example, assume that the configuration's rule set specifies a match for a single 12-person team. If the additional player count is set to 2, only 10 players are initially selected for the match.

" + "documentation":"

The number of player slots in a match to keep open for future players. For example, assume that the configuration's rule set specifies a match for a single 12-person team. If the additional player count is set to 2, only 10 players are initially selected for the match. This parameter is not used if FlexMatchMode is set to STANDALONE.

" }, "CustomEventData":{ "shape":"CustomEventData", @@ -2155,15 +2154,19 @@ }, "GameProperties":{ "shape":"GamePropertyList", - "documentation":"

A set of custom properties for a game session, formatted as key-value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

" + "documentation":"

A set of custom properties for a game session, formatted as key-value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match. This parameter is not used if FlexMatchMode is set to STANDALONE.

" }, "GameSessionData":{ "shape":"GameSessionData", - "documentation":"

A set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

" + "documentation":"

A set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match. This parameter is not used if FlexMatchMode is set to STANDALONE.

" }, "BackfillMode":{ "shape":"BackfillMode", - "documentation":"

The method used to backfill game sessions that are created with this matchmaking configuration. Specify MANUAL when your game manages backfill requests manually or does not use the match backfill feature. Specify AUTOMATIC to have GameLift create a StartMatchBackfill request whenever a game session has one or more open slots. Learn more about manual and automatic backfill in Backfill Existing Games with FlexMatch.

" + "documentation":"

The method used to backfill game sessions that are created with this matchmaking configuration. Specify MANUAL when your game manages backfill requests manually or does not use the match backfill feature. Specify AUTOMATIC to have GameLift create a StartMatchBackfill request whenever a game session has one or more open slots. Learn more about manual and automatic backfill in Backfill Existing Games with FlexMatch. Automatic backfill is not available when FlexMatchMode is set to STANDALONE.

" + }, + "FlexMatchMode":{ + "shape":"FlexMatchMode", + "documentation":"

Indicates whether this matchmaking configuration is being used with GameLift hosting or as a standalone matchmaking solution.

  • STANDALONE - FlexMatch forms matches and returns match information, including players and team assignments, in a MatchmakingSucceeded event.

  • WITH_QUEUE - FlexMatch forms matches and uses the specified GameLift queue to start a game session for the match.

" }, "Tags":{ "shape":"TagList", @@ -2292,7 +2295,7 @@ }, "StorageLocation":{ "shape":"S3Location", - "documentation":"

The location of the Amazon S3 bucket where a zipped file containing your Realtime scripts is stored. The storage location must specify the Amazon S3 bucket name, the zip file name (the \"key\"), and a role ARN that allows Amazon GameLift to access the Amazon S3 storage location. The S3 bucket must be in the same Region where you want to create a new script. By default, Amazon GameLift uploads the latest version of the zip file; if you have S3 object versioning turned on, you can use the ObjectVersion parameter to specify an earlier version.

" + "documentation":"

The Amazon S3 location of your Realtime scripts. The storage location must specify the S3 bucket name, the zip file name (the \"key\"), and an IAM role ARN that allows Amazon GameLift to access the S3 storage location. The S3 bucket must be in the same Region where you are creating a new script. By default, Amazon GameLift uploads the latest version of the zip file; if you have S3 object versioning turned on, you can use the ObjectVersion parameter to specify an earlier version. To call this operation with a storage location, you must have IAM PassRole permission. For more details on IAM roles and PassRole permissions, see Set up a role for GameLift access.

" }, "ZipFile":{ "shape":"ZipBlob", @@ -2417,7 +2420,7 @@ }, "DeleteOption":{ "shape":"GameServerGroupDeleteOption", - "documentation":"

The type of delete to perform. Options include the following:

  • SAFE_DELETE – Terminates the game server group and EC2 Auto Scaling group only when it has no game servers that are in UTILIZED status.

  • FORCE_DELETE – Terminates the game server group, including all active game servers regardless of their utilization status, and the EC2 Auto Scaling group.

  • RETAIN – Does a safe delete of the game server group but retains the EC2 Auto Scaling group as is.

" + "documentation":"

The type of delete to perform. Options include the following:

  • SAFE_DELETE – (default) Terminates the game server group and EC2 Auto Scaling group only when it has no game servers that are in UTILIZED status.

  • FORCE_DELETE – Terminates the game server group, including all active game servers regardless of their utilization status, and the EC2 Auto Scaling group.

  • RETAIN – Does a safe delete of the game server group but retains the EC2 Auto Scaling group as is.

" } } }, @@ -3395,6 +3398,14 @@ "c5.12xlarge", "c5.18xlarge", "c5.24xlarge", + "c5a.large", + "c5a.xlarge", + "c5a.2xlarge", + "c5a.4xlarge", + "c5a.8xlarge", + "c5a.12xlarge", + "c5a.16xlarge", + "c5a.24xlarge", "r3.large", "r3.xlarge", "r3.2xlarge", @@ -3414,6 +3425,14 @@ "r5.12xlarge", "r5.16xlarge", "r5.24xlarge", + "r5a.large", + "r5a.xlarge", + "r5a.2xlarge", + "r5a.4xlarge", + "r5a.8xlarge", + "r5a.12xlarge", + "r5a.16xlarge", + "r5a.24xlarge", "m3.medium", "m3.large", "m3.xlarge", @@ -3430,7 +3449,15 @@ "m5.8xlarge", "m5.12xlarge", "m5.16xlarge", - "m5.24xlarge" + "m5.24xlarge", + "m5a.large", + "m5a.xlarge", + "m5a.2xlarge", + "m5a.4xlarge", + "m5a.8xlarge", + "m5a.12xlarge", + "m5a.16xlarge", + "m5a.24xlarge" ] }, "Event":{ @@ -3608,7 +3635,7 @@ }, "InstanceRoleArn":{ "shape":"NonEmptyString", - "documentation":"

A unique identifier for an AWS IAM role that manages access to your AWS services. With an instance role ARN set, any application that runs on an instance in this fleet can assume the role, including install scripts, server processes, and daemons (background processes). Create a role or look up a role's ARN from the IAM dashboard in the AWS Management Console. Learn more about using on-box credentials for your game servers at Access external resources from a game server.

" + "documentation":"

A unique identifier for an AWS IAM role that manages access to your AWS services.

" }, "CertificateConfiguration":{ "shape":"CertificateConfiguration", @@ -3720,6 +3747,13 @@ "type":"list", "member":{"shape":"FleetUtilization"} }, + "FlexMatchMode":{ + "type":"string", + "enum":[ + "STANDALONE", + "WITH_QUEUE" + ] + }, "Float":{"type":"float"}, "FreeText":{"type":"string"}, "GameProperty":{ @@ -4132,7 +4166,7 @@ }, "MatchmakerData":{ "shape":"MatchmakerData", - "documentation":"

Information about the matchmaking process that was used to create the game session. It is in JSON syntax, formatted as a string. In addition the matchmaking configuration used, it contains data on all players assigned to the match, including player attributes and team assignments. For more details on matchmaker data, see Match Data. Matchmaker data is useful when requesting match backfills, and is updated whenever new players are added during a successful backfill (see StartMatchBackfill).

" + "documentation":"

Information about the matchmaking process that was used to create the game session. It is in JSON syntax, formatted as a string. In addition the matchmaking configuration used, it contains data on all players assigned to the match, including player attributes and team assignments. For more details on matchmaker data, see Match Data. Matchmaker data is useful when requesting match backfills, and is updated whenever new players are added during a successful backfill (see StartMatchBackfill).

" } }, "documentation":"

Properties describing a game session.

A game session in ACTIVE status can host players. When a game session ends, its status is set to TERMINATED.

Once the session ends, the game session object is retained for 30 days. This means you can reuse idempotency token values after this time. Game session logs are retained for 14 days.

" @@ -4166,7 +4200,7 @@ "documentation":"

A collection of player session IDs, one for each player ID that was included in the original matchmaking request.

" } }, - "documentation":"

Connection information for the new game session that is created with matchmaking. (with StartMatchmaking). Once a match is set, the FlexMatch engine places the match and creates a new game session for it. This information, including the game session endpoint and player sessions for each player in the original matchmaking request, is added to the MatchmakingTicket, which can be retrieved by calling DescribeMatchmaking.

" + "documentation":"

Connection information for a new game session that is created in response to a StartMatchmaking request. Once a match is made, the FlexMatch engine creates a new game session for it. This information, including the game session endpoint and player sessions for each player in the original matchmaking request, is added to the MatchmakingTicket, which can be retrieved by calling DescribeMatchmaking.

" }, "GameSessionData":{ "type":"string", @@ -4276,7 +4310,7 @@ }, "MatchmakerData":{ "shape":"MatchmakerData", - "documentation":"

Information on the matchmaking process for this game. Data is in JSON syntax, formatted as a string. It identifies the matchmaking configuration used to create the match, and contains data on all players assigned to the match, including player attributes and team assignments. For more details on matchmaker data, see Match Data.

" + "documentation":"

Information on the matchmaking process for this game. Data is in JSON syntax, formatted as a string. It identifies the matchmaking configuration used to create the match, and contains data on all players assigned to the match, including player attributes and team assignments. For more details on matchmaker data, see Match Data.

" } }, "documentation":"

Object that describes a StartGameSessionPlacement request. This object includes the full details of the original request plus the current status and start/end time stamps.

Game session placement-related operations include:

" @@ -4927,7 +4961,7 @@ }, "ConfigurationArn":{ "shape":"MatchmakingConfigurationArn", - "documentation":"

Amazon Resource Name (ARN) that is assigned to a GameLift matchmaking configuration resource and uniquely identifies it. ARNs are unique across all Regions. In a GameLift configuration ARN, the resource ID matches the Name value.

" + "documentation":"

Amazon Resource Name (ARN) that is assigned to a GameLift matchmaking configuration resource and uniquely identifies it. ARNs are unique across all Regions. In a GameLift configuration ARN, the resource ID matches the Name value.

" }, "Description":{ "shape":"NonZeroAndMaxString", @@ -4935,7 +4969,7 @@ }, "GameSessionQueueArns":{ "shape":"QueueArnsList", - "documentation":"

Amazon Resource Name (ARN) that is assigned to a GameLift game session queue resource and uniquely identifies it. ARNs are unique across all Regions. GameLift uses the listed queues when placing game sessions for matches that are created with this matchmaking configuration. Queues can be located in any Region.

" + "documentation":"

Amazon Resource Name (ARN) that is assigned to a GameLift game session queue resource and uniquely identifies it. ARNs are unique across all Regions. Queues can be located in any Region. Queues are used to start new GameLift-hosted game sessions for matches that are created with this matchmaking configuration. Thais property is not set when FlexMatchMode is set to STANDALONE.

" }, "RequestTimeoutSeconds":{ "shape":"MatchmakingRequestTimeoutInteger", @@ -4943,11 +4977,11 @@ }, "AcceptanceTimeoutSeconds":{ "shape":"MatchmakingAcceptanceTimeoutInteger", - "documentation":"

The length of time (in seconds) to wait for players to accept a proposed match. If any player rejects the match or fails to accept before the timeout, the ticket continues to look for an acceptable match.

" + "documentation":"

The length of time (in seconds) to wait for players to accept a proposed match, if acceptance is required. If any player rejects the match or fails to accept before the timeout, the tickets are returned to the ticket pool and continue to be evaluated for an acceptable match.

" }, "AcceptanceRequired":{ "shape":"BooleanModel", - "documentation":"

A flag that indicates whether a match that was created with this configuration must be accepted by the matched players. To require acceptance, set to TRUE.

" + "documentation":"

A flag that indicates whether a match that was created with this configuration must be accepted by the matched players. To require acceptance, set to TRUE. When this option is enabled, matchmaking tickets use the status REQUIRES_ACCEPTANCE to indicate when a completed potential match is waiting for player acceptance.

" }, "RuleSetName":{ "shape":"MatchmakingIdStringModel", @@ -4963,7 +4997,7 @@ }, "AdditionalPlayerCount":{ "shape":"WholeNumber", - "documentation":"

The number of player slots in a match to keep open for future players. For example, assume that the configuration's rule set specifies a match for a single 12-person team. If the additional player count is set to 2, only 10 players are initially selected for the match.

" + "documentation":"

The number of player slots in a match to keep open for future players. For example, assume that the configuration's rule set specifies a match for a single 12-person team. If the additional player count is set to 2, only 10 players are initially selected for the match. This parameter is not used when FlexMatchMode is set to STANDALONE.

" }, "CustomEventData":{ "shape":"CustomEventData", @@ -4975,15 +5009,19 @@ }, "GameProperties":{ "shape":"GamePropertyList", - "documentation":"

A set of custom properties for a game session, formatted as key-value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

" + "documentation":"

A set of custom properties for a game session, formatted as key-value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match. This parameter is not used when FlexMatchMode is set to STANDALONE.

" }, "GameSessionData":{ "shape":"GameSessionData", - "documentation":"

A set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

" + "documentation":"

A set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match. This parameter is not used when FlexMatchMode is set to STANDALONE.

" }, "BackfillMode":{ "shape":"BackfillMode", - "documentation":"

The method used to backfill game sessions created with this matchmaking configuration. MANUAL indicates that the game makes backfill requests or does not use the match backfill feature. AUTOMATIC indicates that GameLift creates StartMatchBackfill requests whenever a game session has one or more open slots. Learn more about manual and automatic backfill in Backfill Existing Games with FlexMatch.

" + "documentation":"

The method used to backfill game sessions created with this matchmaking configuration. MANUAL indicates that the game makes backfill requests or does not use the match backfill feature. AUTOMATIC indicates that GameLift creates StartMatchBackfill requests whenever a game session has one or more open slots. Learn more about manual and automatic backfill in Backfill Existing Games with FlexMatch. Automatic backfill is not available when FlexMatchMode is set to STANDALONE.

" + }, + "FlexMatchMode":{ + "shape":"FlexMatchMode", + "documentation":"

Indicates whether this matchmaking configuration is being used with GameLift hosting or as a standalone matchmaking solution.

  • STANDALONE - FlexMatch forms matches and returns match information, including players and team assignments, in a MatchmakingSucceeded event.

  • WITH_QUEUE - FlexMatch forms matches and uses the specified GameLift queue to start a game session for the match.

" } }, "documentation":"

Guidelines for use with FlexMatch to match players into games. All matchmaking requests must specify a matchmaking configuration.

" @@ -5055,7 +5093,7 @@ "documentation":"

The time stamp indicating when this data object was created. The format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" } }, - "documentation":"

Set of rule statements, used with FlexMatch, that determine how to build your player matches. Each rule set describes a type of group to be created and defines the parameters for acceptable player matches. Rule sets are used in MatchmakingConfiguration objects.

A rule set may define the following elements for a match. For detailed information and examples showing how to construct a rule set, see Build a FlexMatch Rule Set.

  • Teams -- Required. A rule set must define one or multiple teams for the match and set minimum and maximum team sizes. For example, a rule set might describe a 4x4 match that requires all eight slots to be filled.

  • Player attributes -- Optional. These attributes specify a set of player characteristics to evaluate when looking for a match. Matchmaking requests that use a rule set with player attributes must provide the corresponding attribute values. For example, an attribute might specify a player's skill or level.

  • Rules -- Optional. Rules define how to evaluate potential players for a match based on player attributes. A rule might specify minimum requirements for individual players, teams, or entire matches. For example, a rule might require each player to meet a certain skill level, each team to have at least one player in a certain role, or the match to have a minimum average skill level. or may describe an entire group--such as all teams must be evenly matched or have at least one player in a certain role.

  • Expansions -- Optional. Expansions allow you to relax the rules after a period of time when no acceptable matches are found. This feature lets you balance getting players into games in a reasonable amount of time instead of making them wait indefinitely for the best possible match. For example, you might use an expansion to increase the maximum skill variance between players after 30 seconds.

" + "documentation":"

Set of rule statements, used with FlexMatch, that determine how to build your player matches. Each rule set describes a type of group to be created and defines the parameters for acceptable player matches. Rule sets are used in MatchmakingConfiguration objects.

A rule set may define the following elements for a match. For detailed information and examples showing how to construct a rule set, see Build a FlexMatch Rule Set.

  • Teams -- Required. A rule set must define one or multiple teams for the match and set minimum and maximum team sizes. For example, a rule set might describe a 4x4 match that requires all eight slots to be filled.

  • Player attributes -- Optional. These attributes specify a set of player characteristics to evaluate when looking for a match. Matchmaking requests that use a rule set with player attributes must provide the corresponding attribute values. For example, an attribute might specify a player's skill or level.

  • Rules -- Optional. Rules define how to evaluate potential players for a match based on player attributes. A rule might specify minimum requirements for individual players, teams, or entire matches. For example, a rule might require each player to meet a certain skill level, each team to have at least one player in a certain role, or the match to have a minimum average skill level. or may describe an entire group--such as all teams must be evenly matched or have at least one player in a certain role.

  • Expansions -- Optional. Expansions allow you to relax the rules after a period of time when no acceptable matches are found. This feature lets you balance getting players into games in a reasonable amount of time instead of making them wait indefinitely for the best possible match. For example, you might use an expansion to increase the maximum skill variance between players after 30 seconds.

" }, "MatchmakingRuleSetArn":{ "type":"string", @@ -5119,7 +5157,7 @@ }, "GameSessionConnectionInfo":{ "shape":"GameSessionConnectionInfo", - "documentation":"

Identifier and connection information of the game session created for the match. This information is added to the ticket only after the matchmaking request has been successfully completed.

" + "documentation":"

Identifier and connection information of the game session created for the match. This information is added to the ticket only after the matchmaking request has been successfully completed. This parameter is not set when FlexMatch is being used without GameLift hosting.

" }, "EstimatedWaitTime":{ "shape":"WholeNumber", @@ -5974,7 +6012,6 @@ "type":"structure", "required":[ "ConfigurationName", - "GameSessionArn", "Players" ], "members":{ @@ -5992,7 +6029,7 @@ }, "Players":{ "shape":"PlayerList", - "documentation":"

Match information on all players that are currently assigned to the game session. This information is used by the matchmaker to find new players and add them to the existing game.

  • PlayerID, PlayerAttributes, Team -\\\\- This information is maintained in the GameSession object, MatchmakerData property, for all players who are currently assigned to the game session. The matchmaker data is in JSON syntax, formatted as a string. For more details, see Match Data.

  • LatencyInMs -\\\\- If the matchmaker uses player latency, include a latency value, in milliseconds, for the Region that the game session is currently in. Do not include latency values for any other Region.

" + "documentation":"

Match information on all players that are currently assigned to the game session. This information is used by the matchmaker to find new players and add them to the existing game.

  • PlayerID, PlayerAttributes, Team -\\\\- This information is maintained in the GameSession object, MatchmakerData property, for all players who are currently assigned to the game session. The matchmaker data is in JSON syntax, formatted as a string. For more details, see Match Data.

  • LatencyInMs -\\\\- If the matchmaker uses player latency, include a latency value, in milliseconds, for the Region that the game session is currently in. Do not include latency values for any other Region.

" } }, "documentation":"

Represents the input for a request operation.

" @@ -6595,7 +6632,7 @@ }, "GameSessionQueueArns":{ "shape":"QueueArnsList", - "documentation":"

Amazon Resource Name (ARN) that is assigned to a GameLift game session queue resource and uniquely identifies it. ARNs are unique across all Regions. These queues are used when placing game sessions for matches that are created with this matchmaking configuration. Queues can be located in any Region.

" + "documentation":"

Amazon Resource Name (ARN) that is assigned to a GameLift game session queue resource and uniquely identifies it. ARNs are unique across all Regions. Queues can be located in any Region. Queues are used to start new GameLift-hosted game sessions for matches that are created with this matchmaking configuration. If FlexMatchMode is set to STANDALONE, do not set this parameter.

" }, "RequestTimeoutSeconds":{ "shape":"MatchmakingRequestTimeoutInteger", @@ -6603,11 +6640,11 @@ }, "AcceptanceTimeoutSeconds":{ "shape":"MatchmakingAcceptanceTimeoutInteger", - "documentation":"

The length of time (in seconds) to wait for players to accept a proposed match. If any player rejects the match or fails to accept before the timeout, the ticket continues to look for an acceptable match.

" + "documentation":"

The length of time (in seconds) to wait for players to accept a proposed match, if acceptance is required. If any player rejects the match or fails to accept before the timeout, the tickets are returned to the ticket pool and continue to be evaluated for an acceptable match.

" }, "AcceptanceRequired":{ "shape":"BooleanModel", - "documentation":"

A flag that indicates whether a match that was created with this configuration must be accepted by the matched players. To require acceptance, set to TRUE.

" + "documentation":"

A flag that indicates whether a match that was created with this configuration must be accepted by the matched players. To require acceptance, set to TRUE. With this option enabled, matchmaking tickets use the status REQUIRES_ACCEPTANCE to indicate when a completed potential match is waiting for player acceptance.

" }, "RuleSetName":{ "shape":"MatchmakingRuleSetName", @@ -6615,11 +6652,11 @@ }, "NotificationTarget":{ "shape":"SnsArnStringModel", - "documentation":"

An SNS topic ARN that is set up to receive matchmaking notifications. See Setting up Notifications for Matchmaking for more information.

" + "documentation":"

An SNS topic ARN that is set up to receive matchmaking notifications. See Setting up Notifications for Matchmaking for more information.

" }, "AdditionalPlayerCount":{ "shape":"WholeNumber", - "documentation":"

The number of player slots in a match to keep open for future players. For example, assume that the configuration's rule set specifies a match for a single 12-person team. If the additional player count is set to 2, only 10 players are initially selected for the match.

" + "documentation":"

The number of player slots in a match to keep open for future players. For example, assume that the configuration's rule set specifies a match for a single 12-person team. If the additional player count is set to 2, only 10 players are initially selected for the match. This parameter is not used if FlexMatchMode is set to STANDALONE.

" }, "CustomEventData":{ "shape":"CustomEventData", @@ -6627,15 +6664,19 @@ }, "GameProperties":{ "shape":"GamePropertyList", - "documentation":"

A set of custom properties for a game session, formatted as key-value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

" + "documentation":"

A set of custom properties for a game session, formatted as key-value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match. This parameter is not used if FlexMatchMode is set to STANDALONE.

" }, "GameSessionData":{ "shape":"GameSessionData", - "documentation":"

A set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match.

" + "documentation":"

A set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match. This parameter is not used if FlexMatchMode is set to STANDALONE.

" }, "BackfillMode":{ "shape":"BackfillMode", - "documentation":"

The method that is used to backfill game sessions created with this matchmaking configuration. Specify MANUAL when your game manages backfill requests manually or does not use the match backfill feature. Specify AUTOMATIC to have GameLift create a StartMatchBackfill request whenever a game session has one or more open slots. Learn more about manual and automatic backfill in Backfill Existing Games with FlexMatch.

" + "documentation":"

The method that is used to backfill game sessions created with this matchmaking configuration. Specify MANUAL when your game manages backfill requests manually or does not use the match backfill feature. Specify AUTOMATIC to have GameLift create a StartMatchBackfill request whenever a game session has one or more open slots. Learn more about manual and automatic backfill in Backfill Existing Games with FlexMatch. Automatic backfill is not available when FlexMatchMode is set to STANDALONE.

" + }, + "FlexMatchMode":{ + "shape":"FlexMatchMode", + "documentation":"

Indicates whether this matchmaking configuration is being used with GameLift hosting or as a standalone matchmaking solution.

  • STANDALONE - FlexMatch forms matches and returns match information, including players and team assignments, in a MatchmakingSucceeded event.

  • WITH_QUEUE - FlexMatch forms matches and uses the specified GameLift queue to start a game session for the match.

" } }, "documentation":"

Represents the input for a request operation.

" @@ -6696,7 +6737,7 @@ }, "StorageLocation":{ "shape":"S3Location", - "documentation":"

The location of the Amazon S3 bucket where a zipped file containing your Realtime scripts is stored. The storage location must specify the Amazon S3 bucket name, the zip file name (the \"key\"), and a role ARN that allows Amazon GameLift to access the Amazon S3 storage location. The S3 bucket must be in the same Region where you want to create a new script. By default, Amazon GameLift uploads the latest version of the zip file; if you have S3 object versioning turned on, you can use the ObjectVersion parameter to specify an earlier version.

" + "documentation":"

The Amazon S3 location of your Realtime scripts. The storage location must specify the S3 bucket name, the zip file name (the \"key\"), and an IAM role ARN that allows Amazon GameLift to access the S3 storage location. The S3 bucket must be in the same Region as the script you're updating. By default, Amazon GameLift uploads the latest version of the zip file; if you have S3 object versioning turned on, you can use the ObjectVersion parameter to specify an earlier version. To call this operation with a storage location, you must have IAM PassRole permission. For more details on IAM roles and PassRole permissions, see Set up a role for GameLift access.

" }, "ZipFile":{ "shape":"ZipBlob", diff --git a/services/glacier/pom.xml b/services/glacier/pom.xml index 08544e5d0725..11ebc3fffa5b 100644 --- a/services/glacier/pom.xml +++ b/services/glacier/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT glacier AWS Java SDK :: Services :: Amazon Glacier diff --git a/services/globalaccelerator/pom.xml b/services/globalaccelerator/pom.xml index 0813bfb3a54c..c9a7e0725a7d 100644 --- a/services/globalaccelerator/pom.xml +++ b/services/globalaccelerator/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT globalaccelerator AWS Java SDK :: Services :: Global Accelerator diff --git a/services/globalaccelerator/src/main/resources/codegen-resources/service-2.json b/services/globalaccelerator/src/main/resources/codegen-resources/service-2.json index e3a362e971ba..3251e58891c2 100644 --- a/services/globalaccelerator/src/main/resources/codegen-resources/service-2.json +++ b/services/globalaccelerator/src/main/resources/codegen-resources/service-2.json @@ -43,7 +43,7 @@ {"shape":"InvalidArgumentException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Create an accelerator. An accelerator includes one or more listeners that process inbound connections and direct traffic to one or more endpoint groups, each of which includes endpoints, such as Network Load Balancers. To see an AWS CLI example of creating an accelerator, scroll down to Example.

If you bring your own IP address ranges to AWS Global Accelerator (BYOIP), you can assign IP addresses from your own pool to your accelerator as the static IP address entry points. Only one IP address from each of your IP address ranges can be used for each accelerator.

You must specify the US West (Oregon) Region to create or update accelerators.

" + "documentation":"

Create an accelerator. An accelerator includes one or more listeners that process inbound connections and direct traffic to one or more endpoint groups, each of which includes endpoints, such as Network Load Balancers. To see an AWS CLI example of creating an accelerator, scroll down to Example.

Global Accelerator is a global service that supports endpoints in multiple AWS Regions but you must specify the US West (Oregon) Region to create or update accelerators.

" }, "CreateEndpointGroup":{ "name":"CreateEndpointGroup", @@ -62,7 +62,7 @@ {"shape":"LimitExceededException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Create an endpoint group for the specified listener. An endpoint group is a collection of endpoints in one AWS Region. To see an AWS CLI example of creating an endpoint group, scroll down to Example.

" + "documentation":"

Create an endpoint group for the specified listener. An endpoint group is a collection of endpoints in one AWS Region. A resource must be valid and active when you add it as an endpoint.

To see an AWS CLI example of creating an endpoint group, scroll down to Example.

" }, "CreateListener":{ "name":"CreateListener", @@ -341,7 +341,7 @@ {"shape":"InternalServiceErrorException"}, {"shape":"InvalidArgumentException"} ], - "documentation":"

Update an accelerator. To see an AWS CLI example of updating an accelerator, scroll down to Example.

You must specify the US West (Oregon) Region to create or update accelerators.

" + "documentation":"

Update an accelerator. To see an AWS CLI example of updating an accelerator, scroll down to Example.

Global Accelerator is a global service that supports endpoints in multiple AWS Regions but you must specify the US West (Oregon) Region to create or update accelerators.

" }, "UpdateAcceleratorAttributes":{ "name":"UpdateAcceleratorAttributes", @@ -374,7 +374,7 @@ {"shape":"LimitExceededException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Update an endpoint group. To see an AWS CLI example of updating an endpoint group, scroll down to Example.

" + "documentation":"

Update an endpoint group. A resource must be valid and active when you add it as an endpoint.

To see an AWS CLI example of updating an endpoint group, scroll down to Example.

" }, "UpdateListener":{ "name":"UpdateListener", @@ -555,7 +555,7 @@ }, "Events":{ "shape":"ByoipCidrEvents", - "documentation":"

A history of status changes for an IP address range that that you bring to AWS Global Accelerator through bring your own IP address (BYOIP).

" + "documentation":"

A history of status changes for an IP address range that you bring to AWS Global Accelerator through bring your own IP address (BYOIP).

" } }, "documentation":"

Information about an IP address range that is provisioned for use with your AWS resources through bring your own IP address (BYOIP).

The following describes each BYOIP State that your IP address range can be in.

  • PENDING_PROVISIONING — You’ve submitted a request to provision an IP address range but it is not yet provisioned with AWS Global Accelerator.

  • READY — The address range is provisioned with AWS Global Accelerator and can be advertised.

  • PENDING_ADVERTISING — You’ve submitted a request for AWS Global Accelerator to advertise an address range but it is not yet being advertised.

  • ADVERTISING — The address range is being advertised by AWS Global Accelerator.

  • PENDING_WITHDRAWING — You’ve submitted a request to withdraw an address range from being advertised but it is still being advertised by AWS Global Accelerator.

  • PENDING_DEPROVISIONING — You’ve submitted a request to deprovision an address range from AWS Global Accelerator but it is still provisioned.

  • DEPROVISIONED — The address range is deprovisioned from AWS Global Accelerator.

  • FAILED_PROVISION — The request to provision the address range from AWS Global Accelerator was not successful. Please make sure that you provide all of the correct information, and try again. If the request fails a second time, contact AWS support.

  • FAILED_ADVERTISING — The request for AWS Global Accelerator to advertise the address range was not successful. Please make sure that you provide all of the correct information, and try again. If the request fails a second time, contact AWS support.

  • FAILED_WITHDRAW — The request to withdraw the address range from advertising by AWS Global Accelerator was not successful. Please make sure that you provide all of the correct information, and try again. If the request fails a second time, contact AWS support.

  • FAILED_DEPROVISION — The request to deprovision the address range from AWS Global Accelerator was not successful. Please make sure that you provide all of the correct information, and try again. If the request fails a second time, contact AWS support.

" @@ -648,7 +648,7 @@ }, "IpAddresses":{ "shape":"IpAddresses", - "documentation":"

Optionally, if you've added your own IP address pool to Global Accelerator, you can choose IP addresses from your own pool to use for the accelerator's static IP addresses. You can specify one or two addresses, separated by a comma. Do not include the /32 suffix.

If you specify only one IP address from your IP address range, Global Accelerator assigns a second static IP address for the accelerator from the AWS IP address pool.

For more information, see Bring Your Own IP Addresses (BYOIP) in the AWS Global Accelerator Developer Guide.

" + "documentation":"

Optionally, if you've added your own IP address pool to Global Accelerator (BYOIP), you can choose IP addresses from your own pool to use for the accelerator's static IP addresses when you create an accelerator. You can specify one or two addresses, separated by a comma. Do not include the /32 suffix.

Only one IP address from each of your IP address ranges can be used for each accelerator. If you specify only one IP address from your IP address range, Global Accelerator assigns a second static IP address for the accelerator from the AWS IP address pool.

Note that you can't update IP addresses for an existing accelerator. To change them, you must create a new accelerator with the new addresses.

For more information, see Bring Your Own IP Addresses (BYOIP) in the AWS Global Accelerator Developer Guide.

" }, "Enabled":{ "shape":"GenericBoolean", @@ -688,7 +688,7 @@ }, "EndpointGroupRegion":{ "shape":"GenericString", - "documentation":"

The name of the AWS Region where the endpoint group is located. A listener can have only one endpoint group in a specific Region.

" + "documentation":"

The AWS Region where the endpoint group is located. A listener can have only one endpoint group in a specific Region.

" }, "EndpointConfigurations":{ "shape":"EndpointConfigurations", @@ -722,6 +722,10 @@ "shape":"IdempotencyToken", "documentation":"

A unique, case-sensitive identifier that you provide to ensure the idempotency—that is, the uniqueness—of the request.

", "idempotencyToken":true + }, + "PortOverrides":{ + "shape":"PortOverrides", + "documentation":"

Override specific listener ports used to route traffic to endpoints that are part of this endpoint group. For example, you can create a port override in which the listener receives user traffic on ports 80 and 443, but your accelerator routes that traffic to ports 1080 and 1443, respectively, on the endpoints.

For more information, see Port overrides in the AWS Global Accelerator Developer Guide.

" } } }, @@ -757,7 +761,7 @@ }, "ClientAffinity":{ "shape":"ClientAffinity", - "documentation":"

Client affinity lets you direct all requests from a user to the same endpoint, if you have stateful applications, regardless of the port and protocol of the client request. Clienty affinity gives you control over whether to always route each client to the same specific endpoint.

AWS Global Accelerator uses a consistent-flow hashing algorithm to choose the optimal endpoint for a connection. If client affinity is NONE, Global Accelerator uses the \"five-tuple\" (5-tuple) properties—source IP address, source port, destination IP address, destination port, and protocol—to select the hash value, and then chooses the best endpoint. However, with this setting, if someone uses different ports to connect to Global Accelerator, their connections might not be always routed to the same endpoint because the hash value changes.

If you want a given client to always be routed to the same endpoint, set client affinity to SOURCE_IP instead. When you use the SOURCE_IP setting, Global Accelerator uses the \"two-tuple\" (2-tuple) properties— source (client) IP address and destination IP address—to select the hash value.

The default value is NONE.

" + "documentation":"

Client affinity lets you direct all requests from a user to the same endpoint, if you have stateful applications, regardless of the port and protocol of the client request. Client affinity gives you control over whether to always route each client to the same specific endpoint.

AWS Global Accelerator uses a consistent-flow hashing algorithm to choose the optimal endpoint for a connection. If client affinity is NONE, Global Accelerator uses the \"five-tuple\" (5-tuple) properties—source IP address, source port, destination IP address, destination port, and protocol—to select the hash value, and then chooses the best endpoint. However, with this setting, if someone uses different ports to connect to Global Accelerator, their connections might not be always routed to the same endpoint because the hash value changes.

If you want a given client to always be routed to the same endpoint, set client affinity to SOURCE_IP instead. When you use the SOURCE_IP setting, Global Accelerator uses the \"two-tuple\" (2-tuple) properties— source (client) IP address and destination IP address—to select the hash value.

The default value is NONE.

" }, "IdempotencyToken":{ "shape":"IdempotencyToken", @@ -905,7 +909,7 @@ "members":{ "EndpointId":{ "shape":"GenericString", - "documentation":"

An ID for the endpoint. If the endpoint is a Network Load Balancer or Application Load Balancer, this is the Amazon Resource Name (ARN) of the resource. If the endpoint is an Elastic IP address, this is the Elastic IP address allocation ID. For EC2 instances, this is the EC2 instance ID.

An Application Load Balancer can be either internal or internet-facing.

" + "documentation":"

An ID for the endpoint. If the endpoint is a Network Load Balancer or Application Load Balancer, this is the Amazon Resource Name (ARN) of the resource. If the endpoint is an Elastic IP address, this is the Elastic IP address allocation ID. For Amazon EC2 instances, this is the EC2 instance ID. A resource must be valid and active when you add it as an endpoint.

An Application Load Balancer can be either internal or internet-facing.

" }, "Weight":{ "shape":"EndpointWeight", @@ -916,7 +920,7 @@ "documentation":"

Indicates whether client IP address preservation is enabled for an Application Load Balancer endpoint. The value is true or false. The default value is true for new accelerators.

If the value is set to true, the client's IP address is preserved in the X-Forwarded-For request header as traffic travels to applications on the Application Load Balancer endpoint fronted by the accelerator.

For more information, see Preserve Client IP Addresses in AWS Global Accelerator in the AWS Global Accelerator Developer Guide.

" } }, - "documentation":"

A complex type for endpoints.

" + "documentation":"

A complex type for endpoints. A resource must be valid and active when you add it as an endpoint.

" }, "EndpointConfigurations":{ "type":"list", @@ -963,7 +967,7 @@ }, "EndpointGroupRegion":{ "shape":"GenericString", - "documentation":"

The AWS Region that this endpoint group belongs.

" + "documentation":"

The AWS Region where the endpoint group is located.

" }, "EndpointDescriptions":{ "shape":"EndpointDescriptions", @@ -992,6 +996,10 @@ "ThresholdCount":{ "shape":"ThresholdCount", "documentation":"

The number of consecutive health checks required to set the state of a healthy endpoint to unhealthy, or to set an unhealthy endpoint to healthy. The default value is 3.

" + }, + "PortOverrides":{ + "shape":"PortOverrides", + "documentation":"

Allows you to override the destination ports used to route traffic to an endpoint. Using a port override lets you to map a list of external destination ports (that your users send traffic to) to a list of internal destination ports that you want an application endpoint to receive traffic on.

" } }, "documentation":"

A complex type for the endpoint group. An AWS Region can have only one endpoint group for a specific listener.

" @@ -1284,7 +1292,7 @@ }, "ClientAffinity":{ "shape":"ClientAffinity", - "documentation":"

Client affinity lets you direct all requests from a user to the same endpoint, if you have stateful applications, regardless of the port and protocol of the client request. Clienty affinity gives you control over whether to always route each client to the same specific endpoint.

AWS Global Accelerator uses a consistent-flow hashing algorithm to choose the optimal endpoint for a connection. If client affinity is NONE, Global Accelerator uses the \"five-tuple\" (5-tuple) properties—source IP address, source port, destination IP address, destination port, and protocol—to select the hash value, and then chooses the best endpoint. However, with this setting, if someone uses different ports to connect to Global Accelerator, their connections might not be always routed to the same endpoint because the hash value changes.

If you want a given client to always be routed to the same endpoint, set client affinity to SOURCE_IP instead. When you use the SOURCE_IP setting, Global Accelerator uses the \"two-tuple\" (2-tuple) properties— source (client) IP address and destination IP address—to select the hash value.

The default value is NONE.

" + "documentation":"

Client affinity lets you direct all requests from a user to the same endpoint, if you have stateful applications, regardless of the port and protocol of the client request. Client affinity gives you control over whether to always route each client to the same specific endpoint.

AWS Global Accelerator uses a consistent-flow hashing algorithm to choose the optimal endpoint for a connection. If client affinity is NONE, Global Accelerator uses the \"five-tuple\" (5-tuple) properties—source IP address, source port, destination IP address, destination port, and protocol—to select the hash value, and then chooses the best endpoint. However, with this setting, if someone uses different ports to connect to Global Accelerator, their connections might not be always routed to the same endpoint because the hash value changes.

If you want a given client to always be routed to the same endpoint, set client affinity to SOURCE_IP instead. When you use the SOURCE_IP setting, Global Accelerator uses the \"two-tuple\" (2-tuple) properties— source (client) IP address and destination IP address—to select the hash value.

The default value is NONE.

" } }, "documentation":"

A complex type for a listener.

" @@ -1311,6 +1319,26 @@ "max":65535, "min":1 }, + "PortOverride":{ + "type":"structure", + "members":{ + "ListenerPort":{ + "shape":"PortNumber", + "documentation":"

The listener port that you want to map to a specific endpoint port. This is the port that user traffic arrives to the Global Accelerator on.

" + }, + "EndpointPort":{ + "shape":"PortNumber", + "documentation":"

The endpoint port that you want a listener port to be mapped to. This is the port on the endpoint, such as the Application Load Balancer or Amazon EC2 instance.

" + } + }, + "documentation":"

Override specific listener ports used to route traffic to endpoints that are part of an endpoint group. For example, you can create a port override in which the listener receives user traffic on ports 80 and 443, but your accelerator routes that traffic to ports 1080 and 1443, respectively, on the endpoints.

For more information, see Port overrides in the AWS Global Accelerator Developer Guide.

" + }, + "PortOverrides":{ + "type":"list", + "member":{"shape":"PortOverride"}, + "max":10, + "min":0 + }, "PortRange":{ "type":"structure", "members":{ @@ -1534,7 +1562,7 @@ }, "EndpointConfigurations":{ "shape":"EndpointConfigurations", - "documentation":"

The list of endpoint objects.

" + "documentation":"

The list of endpoint objects. A resource must be valid and active when you add it as an endpoint.

" }, "TrafficDialPercentage":{ "shape":"TrafficDialPercentage", @@ -1559,6 +1587,10 @@ "ThresholdCount":{ "shape":"ThresholdCount", "documentation":"

The number of consecutive health checks required to set the state of a healthy endpoint to unhealthy, or to set an unhealthy endpoint to healthy. The default value is 3.

" + }, + "PortOverrides":{ + "shape":"PortOverrides", + "documentation":"

Override specific listener ports used to route traffic to endpoints that are part of this endpoint group. For example, you can create a port override in which the listener receives user traffic on ports 80 and 443, but your accelerator routes that traffic to ports 1080 and 1443, respectively, on the endpoints.

For more information, see Port overrides in the AWS Global Accelerator Developer Guide.

" } } }, @@ -1589,7 +1621,7 @@ }, "ClientAffinity":{ "shape":"ClientAffinity", - "documentation":"

Client affinity lets you direct all requests from a user to the same endpoint, if you have stateful applications, regardless of the port and protocol of the client request. Clienty affinity gives you control over whether to always route each client to the same specific endpoint.

AWS Global Accelerator uses a consistent-flow hashing algorithm to choose the optimal endpoint for a connection. If client affinity is NONE, Global Accelerator uses the \"five-tuple\" (5-tuple) properties—source IP address, source port, destination IP address, destination port, and protocol—to select the hash value, and then chooses the best endpoint. However, with this setting, if someone uses different ports to connect to Global Accelerator, their connections might not be always routed to the same endpoint because the hash value changes.

If you want a given client to always be routed to the same endpoint, set client affinity to SOURCE_IP instead. When you use the SOURCE_IP setting, Global Accelerator uses the \"two-tuple\" (2-tuple) properties— source (client) IP address and destination IP address—to select the hash value.

The default value is NONE.

" + "documentation":"

Client affinity lets you direct all requests from a user to the same endpoint, if you have stateful applications, regardless of the port and protocol of the client request. Client affinity gives you control over whether to always route each client to the same specific endpoint.

AWS Global Accelerator uses a consistent-flow hashing algorithm to choose the optimal endpoint for a connection. If client affinity is NONE, Global Accelerator uses the \"five-tuple\" (5-tuple) properties—source IP address, source port, destination IP address, destination port, and protocol—to select the hash value, and then chooses the best endpoint. However, with this setting, if someone uses different ports to connect to Global Accelerator, their connections might not be always routed to the same endpoint because the hash value changes.

If you want a given client to always be routed to the same endpoint, set client affinity to SOURCE_IP instead. When you use the SOURCE_IP setting, Global Accelerator uses the \"two-tuple\" (2-tuple) properties— source (client) IP address and destination IP address—to select the hash value.

The default value is NONE.

" } } }, @@ -1622,5 +1654,5 @@ } } }, - "documentation":"AWS Global Accelerator

This is the AWS Global Accelerator API Reference. This guide is for developers who need detailed information about AWS Global Accelerator API actions, data types, and errors. For more information about Global Accelerator features, see the AWS Global Accelerator Developer Guide.

AWS Global Accelerator is a service in which you create accelerators to improve availability and performance of your applications for local and global users.

You must specify the US West (Oregon) Region to create or update accelerators.

By default, Global Accelerator provides you with static IP addresses that you associate with your accelerator. (Instead of using the IP addresses that Global Accelerator provides, you can configure these entry points to be IPv4 addresses from your own IP address ranges that you bring to Global Accelerator.) The static IP addresses are anycast from the AWS edge network and distribute incoming application traffic across multiple endpoint resources in multiple AWS Regions, which increases the availability of your applications. Endpoints can be Network Load Balancers, Application Load Balancers, EC2 instances, or Elastic IP addresses that are located in one AWS Region or multiple Regions.

Global Accelerator uses the AWS global network to route traffic to the optimal regional endpoint based on health, client location, and policies that you configure. The service reacts instantly to changes in health or configuration to ensure that internet traffic from clients is directed to only healthy endpoints.

Global Accelerator includes components that work together to help you improve performance and availability for your applications:

Static IP address

By default, AWS Global Accelerator provides you with a set of static IP addresses that are anycast from the AWS edge network and serve as the single fixed entry points for your clients. Or you can configure these entry points to be IPv4 addresses from your own IP address ranges that you bring to Global Accelerator (BYOIP). For more information, see Bring Your Own IP Addresses (BYOIP) in the AWS Global Accelerator Developer Guide. If you already have load balancers, EC2 instances, or Elastic IP addresses set up for your applications, you can easily add those to Global Accelerator to allow the resources to be accessed by the static IP addresses.

The static IP addresses remain assigned to your accelerator for as long as it exists, even if you disable the accelerator and it no longer accepts or routes traffic. However, when you delete an accelerator, you lose the static IP addresses that are assigned to it, so you can no longer route traffic by using them. You can use IAM policies with Global Accelerator to limit the users who have permissions to delete an accelerator. For more information, see Authentication and Access Control in the AWS Global Accelerator Developer Guide.

Accelerator

An accelerator directs traffic to optimal endpoints over the AWS global network to improve availability and performance for your internet applications that have a global audience. Each accelerator includes one or more listeners.

DNS name

Global Accelerator assigns each accelerator a default Domain Name System (DNS) name, similar to a1234567890abcdef.awsglobalaccelerator.com, that points to your Global Accelerator static IP addresses. Depending on the use case, you can use your accelerator's static IP addresses or DNS name to route traffic to your accelerator, or set up DNS records to route traffic using your own custom domain name.

Network zone

A network zone services the static IP addresses for your accelerator from a unique IP subnet. Similar to an AWS Availability Zone, a network zone is an isolated unit with its own set of physical infrastructure. When you configure an accelerator, by default, Global Accelerator allocates two IPv4 addresses for it. If one IP address from a network zone becomes unavailable due to IP address blocking by certain client networks, or network disruptions, then client applications can retry on the healthy static IP address from the other isolated network zone.

Listener

A listener processes inbound connections from clients to Global Accelerator, based on the protocol and port that you configure. Each listener has one or more endpoint groups associated with it, and traffic is forwarded to endpoints in one of the groups. You associate endpoint groups with listeners by specifying the Regions that you want to distribute traffic to. Traffic is distributed to optimal endpoints within the endpoint groups associated with a listener.

Endpoint group

Each endpoint group is associated with a specific AWS Region. Endpoint groups include one or more endpoints in the Region. You can increase or reduce the percentage of traffic that would be otherwise directed to an endpoint group by adjusting a setting called a traffic dial. The traffic dial lets you easily do performance testing or blue/green deployment testing for new releases across different AWS Regions, for example.

Endpoint

An endpoint is a Network Load Balancer, Application Load Balancer, EC2 instance, or Elastic IP address. Traffic is routed to endpoints based on several factors, including the geo-proximity to the user, the health of the endpoint, and the configuration options that you choose, such as endpoint weights. For each endpoint, you can configure weights, which are numbers that you can use to specify the proportion of traffic to route to each one. This can be useful, for example, to do performance testing within a Region.

" + "documentation":"AWS Global Accelerator

This is the AWS Global Accelerator API Reference. This guide is for developers who need detailed information about AWS Global Accelerator API actions, data types, and errors. For more information about Global Accelerator features, see the AWS Global Accelerator Developer Guide.

AWS Global Accelerator is a service in which you create accelerators to improve availability and performance of your applications for local and global users. Global Accelerator directs traffic to optimal endpoints over the AWS global network. This improves the availability and performance of your internet applications that are used by a global audience. Global Accelerator is a global service that supports endpoints in multiple AWS Regions, which are listed in the AWS Region Table.

Global Accelerator is a global service that supports endpoints in multiple AWS Regions but you must specify the US West (Oregon) Region to create or update accelerators.

By default, Global Accelerator provides you with static IP addresses that you associate with your accelerator. (Instead of using the IP addresses that Global Accelerator provides, you can configure these entry points to be IPv4 addresses from your own IP address ranges that you bring to Global Accelerator.) The static IP addresses are anycast from the AWS edge network and distribute incoming application traffic across multiple endpoint resources in multiple AWS Regions, which increases the availability of your applications. Endpoints can be Network Load Balancers, Application Load Balancers, EC2 instances, or Elastic IP addresses that are located in one AWS Region or multiple Regions.

Global Accelerator uses the AWS global network to route traffic to the optimal regional endpoint based on health, client location, and policies that you configure. The service reacts instantly to changes in health or configuration to ensure that internet traffic from clients is directed to only healthy endpoints.

Global Accelerator includes components that work together to help you improve performance and availability for your applications:

Static IP address

By default, AWS Global Accelerator provides you with a set of static IP addresses that are anycast from the AWS edge network and serve as the single fixed entry points for your clients. Or you can configure these entry points to be IPv4 addresses from your own IP address ranges that you bring to Global Accelerator (BYOIP). For more information, see Bring Your Own IP Addresses (BYOIP) in the AWS Global Accelerator Developer Guide. If you already have load balancers, EC2 instances, or Elastic IP addresses set up for your applications, you can easily add those to Global Accelerator to allow the resources to be accessed by the static IP addresses.

The static IP addresses remain assigned to your accelerator for as long as it exists, even if you disable the accelerator and it no longer accepts or routes traffic. However, when you delete an accelerator, you lose the static IP addresses that are assigned to it, so you can no longer route traffic by using them. You can use IAM policies with Global Accelerator to limit the users who have permissions to delete an accelerator. For more information, see Authentication and Access Control in the AWS Global Accelerator Developer Guide.

Accelerator

An accelerator directs traffic to optimal endpoints over the AWS global network to improve availability and performance for your internet applications that have a global audience. Each accelerator includes one or more listeners.

DNS name

Global Accelerator assigns each accelerator a default Domain Name System (DNS) name, similar to a1234567890abcdef.awsglobalaccelerator.com, that points to your Global Accelerator static IP addresses. Depending on the use case, you can use your accelerator's static IP addresses or DNS name to route traffic to your accelerator, or set up DNS records to route traffic using your own custom domain name.

Network zone

A network zone services the static IP addresses for your accelerator from a unique IP subnet. Similar to an AWS Availability Zone, a network zone is an isolated unit with its own set of physical infrastructure. When you configure an accelerator, by default, Global Accelerator allocates two IPv4 addresses for it. If one IP address from a network zone becomes unavailable due to IP address blocking by certain client networks, or network disruptions, then client applications can retry on the healthy static IP address from the other isolated network zone.

Listener

A listener processes inbound connections from clients to Global Accelerator, based on the protocol and port that you configure. Each listener has one or more endpoint groups associated with it, and traffic is forwarded to endpoints in one of the groups. You associate endpoint groups with listeners by specifying the Regions that you want to distribute traffic to. Traffic is distributed to optimal endpoints within the endpoint groups associated with a listener.

Endpoint group

Each endpoint group is associated with a specific AWS Region. Endpoint groups include one or more endpoints in the Region. You can increase or reduce the percentage of traffic that would be otherwise directed to an endpoint group by adjusting a setting called a traffic dial. The traffic dial lets you easily do performance testing or blue/green deployment testing for new releases across different AWS Regions, for example.

Endpoint

An endpoint is a Network Load Balancer, Application Load Balancer, EC2 instance, or Elastic IP address. Traffic is routed to endpoints based on several factors, including the geo-proximity to the user, the health of the endpoint, and the configuration options that you choose, such as endpoint weights. For each endpoint, you can configure weights, which are numbers that you can use to specify the proportion of traffic to route to each one. This can be useful, for example, to do performance testing within a Region.

" } diff --git a/services/glue/pom.xml b/services/glue/pom.xml index 556d2876538c..73603dff6e04 100644 --- a/services/glue/pom.xml +++ b/services/glue/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT 4.0.0 glue diff --git a/services/glue/src/main/resources/codegen-resources/paginators-1.json b/services/glue/src/main/resources/codegen-resources/paginators-1.json index b905b5802f99..f3a2874c231c 100644 --- a/services/glue/src/main/resources/codegen-resources/paginators-1.json +++ b/services/glue/src/main/resources/codegen-resources/paginators-1.json @@ -60,6 +60,12 @@ "limit_key": "MaxResults", "output_token": "NextToken" }, + "GetResourcePolicies": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "GetResourcePoliciesResponseList" + }, "GetSecurityConfigurations": { "input_token": "NextToken", "limit_key": "MaxResults", @@ -111,6 +117,24 @@ "limit_key": "MaxResults", "output_token": "NextToken" }, + "ListRegistries": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Registries" + }, + "ListSchemaVersions": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Schemas" + }, + "ListSchemas": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Schemas" + }, "ListTriggers": { "input_token": "NextToken", "limit_key": "MaxResults", diff --git a/services/glue/src/main/resources/codegen-resources/service-2.json b/services/glue/src/main/resources/codegen-resources/service-2.json index 0f3688d93248..d55787e83e1f 100644 --- a/services/glue/src/main/resources/codegen-resources/service-2.json +++ b/services/glue/src/main/resources/codegen-resources/service-2.json @@ -233,6 +233,21 @@ ], "documentation":"

Cancels (stops) a task run. Machine learning task runs are asynchronous tasks that AWS Glue runs on your behalf as part of various machine learning workflows. You can cancel a machine learning task run at any time by calling CancelMLTaskRun with a task run's parent transform's TransformID and the task run's TaskRunId.

" }, + "CheckSchemaVersionValidity":{ + "name":"CheckSchemaVersionValidity", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CheckSchemaVersionValidityInput"}, + "output":{"shape":"CheckSchemaVersionValidityResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Validates the supplied schema. This call has no side effects, it simply validates using the supplied schema using DataFormat as the format. Since it does not take a schema set name, no compatibility checks are performed.

" + }, "CreateClassifier":{ "name":"CreateClassifier", "http":{ @@ -376,6 +391,60 @@ ], "documentation":"

Creates a new partition.

" }, + "CreatePartitionIndex":{ + "name":"CreatePartitionIndex", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreatePartitionIndexRequest"}, + "output":{"shape":"CreatePartitionIndexResponse"}, + "errors":[ + {"shape":"AlreadyExistsException"}, + {"shape":"InvalidInputException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"ResourceNumberLimitExceededException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"GlueEncryptionException"} + ], + "documentation":"

Creates a specified partition index in an existing table.

" + }, + "CreateRegistry":{ + "name":"CreateRegistry", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateRegistryInput"}, + "output":{"shape":"CreateRegistryResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"AlreadyExistsException"}, + {"shape":"ResourceNumberLimitExceededException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Creates a new registry which may be used to hold a collection of schemas.

" + }, + "CreateSchema":{ + "name":"CreateSchema", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateSchemaInput"}, + "output":{"shape":"CreateSchemaResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"AlreadyExistsException"}, + {"shape":"ResourceNumberLimitExceededException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Creates a new schema set and registers the schema definition. Returns an error if the schema set already exists without actually registering the version.

When the schema set is created, a version checkpoint will be set to the first version. Compatibility mode \"DISABLED\" restricts any additional schema versions from being added after the first schema version. For all other compatibility modes, validation of compatibility settings will be applied only from the second version onwards when the RegisterSchemaVersion API is used.

When this API is called without a RegistryId, this will create an entry for a \"default-registry\" in the registry database tables, if it is not already present.

" + }, "CreateScript":{ "name":"CreateScript", "http":{ @@ -513,7 +582,7 @@ {"shape":"OperationTimeoutException"}, {"shape":"GlueEncryptionException"} ], - "documentation":"

Delete the partition column statistics of a column.

" + "documentation":"

Delete the partition column statistics of a column.

The Identity and Access Management (IAM) permission required for this operation is DeletePartition.

" }, "DeleteColumnStatisticsForTable":{ "name":"DeleteColumnStatisticsForTable", @@ -530,7 +599,7 @@ {"shape":"OperationTimeoutException"}, {"shape":"GlueEncryptionException"} ], - "documentation":"

Retrieves table statistics of columns.

" + "documentation":"

Retrieves table statistics of columns.

The Identity and Access Management (IAM) permission required for this operation is DeleteTable.

" }, "DeleteConnection":{ "name":"DeleteConnection", @@ -641,6 +710,40 @@ ], "documentation":"

Deletes a specified partition.

" }, + "DeletePartitionIndex":{ + "name":"DeletePartitionIndex", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeletePartitionIndexRequest"}, + "output":{"shape":"DeletePartitionIndexResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"InvalidInputException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"GlueEncryptionException"} + ], + "documentation":"

Deletes a specified partition index from an existing table.

" + }, + "DeleteRegistry":{ + "name":"DeleteRegistry", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteRegistryInput"}, + "output":{"shape":"DeleteRegistryResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConcurrentModificationException"} + ], + "documentation":"

Delete the entire registry including schema and all of its versions. To get the status of the delete operation, you can call the GetRegistry API after the asynchronous call. Deleting a registry will disable all online operations for the registry such as the UpdateRegistry, CreateSchema, UpdateSchema, and RegisterSchemaVersion APIs.

" + }, "DeleteResourcePolicy":{ "name":"DeleteResourcePolicy", "http":{ @@ -658,6 +761,38 @@ ], "documentation":"

Deletes a specified policy.

" }, + "DeleteSchema":{ + "name":"DeleteSchema", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteSchemaInput"}, + "output":{"shape":"DeleteSchemaResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConcurrentModificationException"} + ], + "documentation":"

Deletes the entire schema set, including the schema set and all of its versions. To get the status of the delete operation, you can call GetSchema API after the asynchronous call. Deleting a registry will disable all online operations for the schema, such as the GetSchemaByDefinition, and RegisterSchemaVersion APIs.

" + }, + "DeleteSchemaVersions":{ + "name":"DeleteSchemaVersions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteSchemaVersionsInput"}, + "output":{"shape":"DeleteSchemaVersionsResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConcurrentModificationException"} + ], + "documentation":"

Remove versions from the specified schema. A version number or range may be supplied. If the compatibility mode forbids deleting of a version that is necessary, such as BACKWARDS_FULL, an error is returned. Calling the GetSchemaVersions API after this call will list the status of the deleted versions.

When the range of version numbers contain check pointed version, the API will return a 409 conflict and will not proceed with the deletion. You have to remove the checkpoint first using the DeleteSchemaCheckpoint API before using this API.

You cannot use the DeleteSchemaVersions API to delete the first schema version in the schema set. The first schema version can only be deleted by the DeleteSchema API. This operation will also delete the attached SchemaVersionMetadata under the schema versions. Hard deletes will be enforced on the database.

If the compatibility mode forbids deleting of a version that is necessary, such as BACKWARDS_FULL, an error is returned.

" + }, "DeleteSecurityConfiguration":{ "name":"DeleteSecurityConfiguration", "http":{ @@ -810,7 +945,7 @@ {"shape":"OperationTimeoutException"}, {"shape":"GlueEncryptionException"} ], - "documentation":"

Retrieves partition statistics of columns.

" + "documentation":"

Retrieves partition statistics of columns.

The Identity and Access Management (IAM) permission required for this operation is GetPartition.

" }, "GetColumnStatisticsForTable":{ "name":"GetColumnStatisticsForTable", @@ -827,7 +962,7 @@ {"shape":"OperationTimeoutException"}, {"shape":"GlueEncryptionException"} ], - "documentation":"

Retrieves table statistics of columns.

" + "documentation":"

Retrieves table statistics of columns.

The Identity and Access Management (IAM) permission required for this operation is GetTable.

" }, "GetConnection":{ "name":"GetConnection", @@ -1223,6 +1358,22 @@ ], "documentation":"

Gets code to perform a specified mapping.

" }, + "GetRegistry":{ + "name":"GetRegistry", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetRegistryInput"}, + "output":{"shape":"GetRegistryResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Describes the specified registry in detail.

" + }, "GetResourcePolicies":{ "name":"GetResourcePolicies", "http":{ @@ -1255,6 +1406,70 @@ ], "documentation":"

Retrieves a specified resource policy.

" }, + "GetSchema":{ + "name":"GetSchema", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetSchemaInput"}, + "output":{"shape":"GetSchemaResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Describes the specified schema in detail.

" + }, + "GetSchemaByDefinition":{ + "name":"GetSchemaByDefinition", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetSchemaByDefinitionInput"}, + "output":{"shape":"GetSchemaByDefinitionResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Retrieves a schema by the SchemaDefinition. The schema definition is sent to the Schema Registry, canonicalized, and hashed. If the hash is matched within the scope of the SchemaName or ARN (or the default registry, if none is supplied), that schema’s metadata is returned. Otherwise, a 404 or NotFound error is returned. Schema versions in Deleted statuses will not be included in the results.

" + }, + "GetSchemaVersion":{ + "name":"GetSchemaVersion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetSchemaVersionInput"}, + "output":{"shape":"GetSchemaVersionResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Get the specified schema by its unique ID assigned when a version of the schema is created or registered. Schema versions in Deleted status will not be included in the results.

" + }, + "GetSchemaVersionsDiff":{ + "name":"GetSchemaVersionsDiff", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetSchemaVersionsDiffInput"}, + "output":{"shape":"GetSchemaVersionsDiffResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Fetches the schema version difference in the specified difference type between two stored schema versions in the Schema Registry.

This API allows you to compare two schema versions between two schema definitions under the same schema.

" + }, "GetSecurityConfiguration":{ "name":"GetSecurityConfiguration", "http":{ @@ -1576,6 +1791,53 @@ ], "documentation":"

Retrieves a sortable, filterable list of existing AWS Glue machine learning transforms in this AWS account, or the resources with the specified tag. This operation takes the optional Tags field, which you can use as a filter of the responses so that tagged resources can be retrieved as a group. If you choose to use tag filtering, only resources with the tags are retrieved.

" }, + "ListRegistries":{ + "name":"ListRegistries", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListRegistriesInput"}, + "output":{"shape":"ListRegistriesResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Returns a list of registries that you have created, with minimal registry information. Registries in the Deleting status will not be included in the results. Empty results will be returned if there are no registries available.

" + }, + "ListSchemaVersions":{ + "name":"ListSchemaVersions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListSchemaVersionsInput"}, + "output":{"shape":"ListSchemaVersionsResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Returns a list of schema versions that you have created, with minimal information. Schema versions in Deleted status will not be included in the results. Empty results will be returned if there are no schema versions available.

" + }, + "ListSchemas":{ + "name":"ListSchemas", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListSchemasInput"}, + "output":{"shape":"ListSchemasResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Returns a list of schemas with minimal details. Schemas in Deleting status will not be included in the results. Empty results will be returned if there are no schemas available.

When the RegistryId is not provided, all the schemas across registries will be part of the API response.

" + }, "ListTriggers":{ "name":"ListTriggers", "http":{ @@ -1639,6 +1901,23 @@ ], "documentation":"

Sets the Data Catalog resource policy for access control.

" }, + "PutSchemaVersionMetadata":{ + "name":"PutSchemaVersionMetadata", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutSchemaVersionMetadataInput"}, + "output":{"shape":"PutSchemaVersionMetadataResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"AlreadyExistsException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"ResourceNumberLimitExceededException"} + ], + "documentation":"

Puts the metadata key value pair for a specified schema version ID. A maximum of 10 key value pairs will be allowed per schema version. They can be added over one or more calls.

" + }, "PutWorkflowRunProperties":{ "name":"PutWorkflowRunProperties", "http":{ @@ -1658,6 +1937,54 @@ ], "documentation":"

Puts the specified workflow run properties for the given workflow run. If a property already exists for the specified run, then it overrides the value otherwise adds the property to existing properties.

" }, + "QuerySchemaVersionMetadata":{ + "name":"QuerySchemaVersionMetadata", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"QuerySchemaVersionMetadataInput"}, + "output":{"shape":"QuerySchemaVersionMetadataResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"EntityNotFoundException"} + ], + "documentation":"

Queries for the schema version metadata information.

" + }, + "RegisterSchemaVersion":{ + "name":"RegisterSchemaVersion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RegisterSchemaVersionInput"}, + "output":{"shape":"RegisterSchemaVersionResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"ResourceNumberLimitExceededException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Adds a new version to the existing schema. Returns an error if new version of schema does not meet the compatibility requirements of the schema set. This API will not create a new schema set and will return a 404 error if the schema set is not already present in the Schema Registry.

If this is the first schema definition to be registered in the Schema Registry, this API will store the schema version and return immediately. Otherwise, this call has the potential to run longer than other operations due to compatibility modes. You can call the GetSchemaVersion API with the SchemaVersionId to check compatibility modes.

If the same schema definition is already stored in Schema Registry as a version, the schema ID of the existing schema is returned to the caller.

" + }, + "RemoveSchemaVersionMetadata":{ + "name":"RemoveSchemaVersionMetadata", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveSchemaVersionMetadataInput"}, + "output":{"shape":"RemoveSchemaVersionMetadataResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"EntityNotFoundException"} + ], + "documentation":"

Removes a key value pair from the schema version metadata for the specified schema version ID.

" + }, "ResetJobBookmark":{ "name":"ResetJobBookmark", "http":{ @@ -1990,7 +2317,7 @@ {"shape":"OperationTimeoutException"}, {"shape":"GlueEncryptionException"} ], - "documentation":"

Creates or updates partition statistics of columns.

" + "documentation":"

Creates or updates partition statistics of columns.

The Identity and Access Management (IAM) permission required for this operation is UpdatePartition.

" }, "UpdateColumnStatisticsForTable":{ "name":"UpdateColumnStatisticsForTable", @@ -2007,7 +2334,7 @@ {"shape":"OperationTimeoutException"}, {"shape":"GlueEncryptionException"} ], - "documentation":"

Creates or updates table statistics of columns.

" + "documentation":"

Creates or updates table statistics of columns.

The Identity and Access Management (IAM) permission required for this operation is UpdateTable.

" }, "UpdateConnection":{ "name":"UpdateConnection", @@ -2145,6 +2472,40 @@ ], "documentation":"

Updates a partition.

" }, + "UpdateRegistry":{ + "name":"UpdateRegistry", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateRegistryInput"}, + "output":{"shape":"UpdateRegistryResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Updates an existing registry which is used to hold a collection of schemas. The updated properties relate to the registry, and do not modify any of the schemas within the registry.

" + }, + "UpdateSchema":{ + "name":"UpdateSchema", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateSchemaInput"}, + "output":{"shape":"UpdateSchemaResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"AccessDeniedException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Updates the description, compatibility setting, or version checkpoint for a schema set.

For updating the compatibility setting, the call will not validate compatibility for the entire set of schema versions with the new compatibility setting. If the value for Compatibility is provided, the VersionNumber (a checkpoint) is also required. The API will validate the checkpoint version number for consistency.

If the value for the VersionNumber (checkpoint) is provided, Compatibility is optional and this can be used to set/reset a checkpoint for the schema.

This update will happen only if the schema is in the AVAILABLE state.

" + }, "UpdateTable":{ "name":"UpdateTable", "http":{ @@ -2279,6 +2640,38 @@ "exception":true }, "AttemptCount":{"type":"integer"}, + "BackfillError":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"BackfillErrorCode", + "documentation":"

The error code for an error that occurred when registering partition indexes for an existing table.

" + }, + "Partitions":{ + "shape":"BackfillErroredPartitionsList", + "documentation":"

A list of a limited number of partitions in the response.

" + } + }, + "documentation":"

A list of errors that can occur when registering partition indexes for an existing table.

These errors give the details about why an index registration failed and provide a limited number of partitions in the response, so that you can fix the partitions at fault and try registering the index again. The most common set of errors that can occur are categorized as follows:

  • EncryptedPartitionError: The partitions are encrypted.

  • InvalidPartitionTypeDataError: The partition value doesn't match the data type for that partition column.

  • MissingPartitionValueError: The partitions are encrypted.

  • UnsupportedPartitionCharacterError: Characters inside the partition value are not supported. For example: U+0000 , U+0001, U+0002.

  • InternalError: Any error which does not belong to other error codes.

" + }, + "BackfillErrorCode":{ + "type":"string", + "enum":[ + "ENCRYPTED_PARTITION_ERROR", + "INTERNAL_ERROR", + "INVALID_PARTITION_TYPE_DATA_ERROR", + "MISSING_PARTITION_VALUE_ERROR", + "UNSUPPORTED_PARTITION_CHARACTER_ERROR" + ] + }, + "BackfillErroredPartitionsList":{ + "type":"list", + "member":{"shape":"PartitionValueList"} + }, + "BackfillErrors":{ + "type":"list", + "member":{"shape":"BackfillError"} + }, "BatchCreatePartitionRequest":{ "type":"structure", "required":[ @@ -2786,18 +3179,18 @@ "members":{ "MaximumLength":{ "shape":"NonNegativeLong", - "documentation":"

Maximum length of the column.

" + "documentation":"

The size of the longest bit sequence in the column.

" }, "AverageLength":{ "shape":"NonNegativeDouble", - "documentation":"

Average length of the column.

" + "documentation":"

The average bit sequence length in the column.

" }, "NumberOfNulls":{ "shape":"NonNegativeLong", - "documentation":"

Number of nulls.

" + "documentation":"

The number of null values in the column.

" } }, - "documentation":"

Defines a binary column statistics data.

" + "documentation":"

Defines column statistics supported for bit sequence data values.

" }, "Blob":{"type":"blob"}, "Boolean":{"type":"boolean"}, @@ -2811,18 +3204,18 @@ "members":{ "NumberOfTrues":{ "shape":"NonNegativeLong", - "documentation":"

Number of true value.

" + "documentation":"

The number of true values in the column.

" }, "NumberOfFalses":{ "shape":"NonNegativeLong", - "documentation":"

Number of false value.

" + "documentation":"

The number of false values in the column.

" }, "NumberOfNulls":{ "shape":"NonNegativeLong", - "documentation":"

Number of nulls.

" + "documentation":"

The number of null values in the column.

" } }, - "documentation":"

Defines a boolean column statistics.

" + "documentation":"

Defines column statistics supported for Boolean data columns.

" }, "BooleanNullable":{"type":"boolean"}, "BooleanValue":{"type":"boolean"}, @@ -2946,6 +3339,36 @@ "type":"list", "member":{"shape":"CatalogTarget"} }, + "CheckSchemaVersionValidityInput":{ + "type":"structure", + "required":[ + "DataFormat", + "SchemaDefinition" + ], + "members":{ + "DataFormat":{ + "shape":"DataFormat", + "documentation":"

The data format of the schema definition. Currently only AVRO is supported.

" + }, + "SchemaDefinition":{ + "shape":"SchemaDefinitionString", + "documentation":"

The definition of the schema that has to be validated.

" + } + } + }, + "CheckSchemaVersionValidityResponse":{ + "type":"structure", + "members":{ + "Valid":{ + "shape":"IsVersionValid", + "documentation":"

Return true, if the schema is valid and false otherwise.

" + }, + "Error":{ + "shape":"SchemaValidationError", + "documentation":"

A validation failure error message.

" + } + } + }, "Classification":{"type":"string"}, "Classifier":{ "type":"structure", @@ -3112,14 +3535,14 @@ "members":{ "ColumnName":{ "shape":"NameString", - "documentation":"

The name of the column.

" + "documentation":"

The name of the column that failed.

" }, "Error":{ "shape":"ErrorDetail", - "documentation":"

The error message occurred during operation.

" + "documentation":"

An error message with the reason for the failure of an operation.

" } }, - "documentation":"

Defines a column containing error.

" + "documentation":"

Encapsulates a column name that failed and the reason for failure.

" }, "ColumnErrors":{ "type":"list", @@ -3146,22 +3569,22 @@ "members":{ "ColumnName":{ "shape":"NameString", - "documentation":"

The name of the column.

" + "documentation":"

Name of column which statistics belong to.

" }, "ColumnType":{ "shape":"TypeString", - "documentation":"

The type of the column.

" + "documentation":"

The data type of the column.

" }, "AnalyzedTime":{ "shape":"Timestamp", - "documentation":"

The analyzed time of the column statistics.

" + "documentation":"

The timestamp of when column statistics were generated.

" }, "StatisticsData":{ "shape":"ColumnStatisticsData", - "documentation":"

The statistics of the column.

" + "documentation":"

A ColumnStatisticData object that contains the statistics data values.

" } }, - "documentation":"

Defines a column statistics.

" + "documentation":"

Represents the generated column-level statistics for a table or partition.

" }, "ColumnStatisticsData":{ "type":"structure", @@ -3169,52 +3592,52 @@ "members":{ "Type":{ "shape":"ColumnStatisticsType", - "documentation":"

The name of the column.

" + "documentation":"

The type of column statistics data.

" }, "BooleanColumnStatisticsData":{ "shape":"BooleanColumnStatisticsData", - "documentation":"

Boolean Column Statistics Data.

" + "documentation":"

Boolean column statistics data.

" }, "DateColumnStatisticsData":{ "shape":"DateColumnStatisticsData", - "documentation":"

Date Column Statistics Data.

" + "documentation":"

Date column statistics data.

" }, "DecimalColumnStatisticsData":{ "shape":"DecimalColumnStatisticsData", - "documentation":"

Decimal Column Statistics Data.

" + "documentation":"

Decimal column statistics data.

" }, "DoubleColumnStatisticsData":{ "shape":"DoubleColumnStatisticsData", - "documentation":"

Double Column Statistics Data.

" + "documentation":"

Double column statistics data.

" }, "LongColumnStatisticsData":{ "shape":"LongColumnStatisticsData", - "documentation":"

Long Column Statistics Data.

" + "documentation":"

Long column statistics data.

" }, "StringColumnStatisticsData":{ "shape":"StringColumnStatisticsData", - "documentation":"

String Column Statistics Data.

" + "documentation":"

String column statistics data.

" }, "BinaryColumnStatisticsData":{ "shape":"BinaryColumnStatisticsData", - "documentation":"

Binary Column Statistics Data.

" + "documentation":"

Binary column statistics data.

" } }, - "documentation":"

Defines a column statistics data.

" + "documentation":"

Contains the individual types of column statistics data. Only one data object should be set and indicated by the Type attribute.

" }, "ColumnStatisticsError":{ "type":"structure", "members":{ "ColumnStatistics":{ "shape":"ColumnStatistics", - "documentation":"

The ColumnStatistics of the column.

" + "documentation":"

The ColumnStatistics of the column.

" }, "Error":{ "shape":"ErrorDetail", - "documentation":"

The error message occurred during operation.

" + "documentation":"

An error message with the reason for the failure of an operation.

" } }, - "documentation":"

Defines a column containing error.

" + "documentation":"

Encapsulates a ColumnStatistics object that failed and the reason for failure.

" }, "ColumnStatisticsErrors":{ "type":"list", @@ -3263,6 +3686,19 @@ "LESS_THAN_EQUALS" ] }, + "Compatibility":{ + "type":"string", + "enum":[ + "NONE", + "DISABLED", + "BACKWARD", + "BACKWARD_ALL", + "FORWARD", + "FORWARD_ALL", + "FULL", + "FULL_ALL" + ] + }, "ConcurrentModificationException":{ "type":"structure", "members":{ @@ -3580,10 +4016,18 @@ "shape":"ClassifierNameList", "documentation":"

A list of UTF-8 strings that specify the custom classifiers that are associated with the crawler.

" }, + "RecrawlPolicy":{ + "shape":"RecrawlPolicy", + "documentation":"

A policy that specifies whether to crawl the entire dataset again, or to crawl only folders that were added since the last crawler run.

" + }, "SchemaChangePolicy":{ "shape":"SchemaChangePolicy", "documentation":"

The policy that specifies update and delete behaviors for the crawler.

" }, + "LineageConfiguration":{ + "shape":"LineageConfiguration", + "documentation":"

A configuration that specifies whether data lineage is enabled for the crawler.

" + }, "State":{ "shape":"CrawlerState", "documentation":"

Indicates whether the crawler is running, or whether a run is pending.

" @@ -3628,6 +4072,13 @@ "documentation":"

Specifies a crawler program that examines a data source and uses classifiers to try to determine its schema. If successful, the crawler records metadata concerning the data source in the AWS Glue Data Catalog.

" }, "CrawlerConfiguration":{"type":"string"}, + "CrawlerLineageSettings":{ + "type":"string", + "enum":[ + "ENABLE", + "DISABLE" + ] + }, "CrawlerList":{ "type":"list", "member":{"shape":"Crawler"} @@ -3851,6 +4302,14 @@ "shape":"SchemaChangePolicy", "documentation":"

The policy for the crawler's update and deletion behavior.

" }, + "RecrawlPolicy":{ + "shape":"RecrawlPolicy", + "documentation":"

A policy that specifies whether to crawl the entire dataset again, or to crawl only folders that were added since the last crawler run.

" + }, + "LineageConfiguration":{ + "shape":"LineageConfiguration", + "documentation":"

Specifies data lineage configuration settings for the crawler.

" + }, "Configuration":{ "shape":"CrawlerConfiguration", "documentation":"

Crawler configuration information. This versioned JSON string allows users to specify aspects of a crawler's behavior. For more information, see Configuring a Crawler.

" @@ -4272,6 +4731,10 @@ "Tags":{ "shape":"TagsMap", "documentation":"

The tags to use with this machine learning transform. You may use tags to limit access to the machine learning transform. For more information about tags in AWS Glue, see AWS Tags in AWS Glue in the developer guide.

" + }, + "TransformEncryption":{ + "shape":"TransformEncryption", + "documentation":"

The encryption-at-rest settings of the transform that apply to accessing user data. Machine learning transforms can access user data encrypted in Amazon S3 using KMS.

" } } }, @@ -4284,6 +4747,37 @@ } } }, + "CreatePartitionIndexRequest":{ + "type":"structure", + "required":[ + "DatabaseName", + "TableName", + "PartitionIndex" + ], + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The catalog ID where the table resides.

" + }, + "DatabaseName":{ + "shape":"NameString", + "documentation":"

Specifies the name of a database in which you want to create a partition index.

" + }, + "TableName":{ + "shape":"NameString", + "documentation":"

Specifies the name of a table in which you want to create a partition index.

" + }, + "PartitionIndex":{ + "shape":"PartitionIndex", + "documentation":"

Specifies a PartitionIndex structure to create a partition index in an existing table.

" + } + } + }, + "CreatePartitionIndexResponse":{ + "type":"structure", + "members":{ + } + }, "CreatePartitionRequest":{ "type":"structure", "required":[ @@ -4315,6 +4809,143 @@ "members":{ } }, + "CreateRegistryInput":{ + "type":"structure", + "required":["RegistryName"], + "members":{ + "RegistryName":{ + "shape":"SchemaRegistryNameString", + "documentation":"

Name of the registry to be created of max length of 255, and may only contain letters, numbers, hyphen, underscore, dollar sign, or hash mark. No whitespace.

" + }, + "Description":{ + "shape":"DescriptionString", + "documentation":"

A description of the registry. If description is not provided, there will not be any default value for this.

" + }, + "Tags":{ + "shape":"TagsMap", + "documentation":"

AWS tags that contain a key value pair and may be searched by console, command line, or API.

" + } + } + }, + "CreateRegistryResponse":{ + "type":"structure", + "members":{ + "RegistryArn":{ + "shape":"GlueResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the newly created registry.

" + }, + "RegistryName":{ + "shape":"SchemaRegistryNameString", + "documentation":"

The name of the registry.

" + }, + "Description":{ + "shape":"DescriptionString", + "documentation":"

A description of the registry.

" + }, + "Tags":{ + "shape":"TagsMap", + "documentation":"

The tags for the registry.

" + } + } + }, + "CreateSchemaInput":{ + "type":"structure", + "required":[ + "SchemaName", + "DataFormat" + ], + "members":{ + "RegistryId":{ + "shape":"RegistryId", + "documentation":"

This is a wrapper shape to contain the registry identity fields. If this is not provided, the default registry will be used. The ARN format for the same will be: arn:aws:glue:us-east-2:<customer id>:registry/default-registry:random-5-letter-id.

" + }, + "SchemaName":{ + "shape":"SchemaRegistryNameString", + "documentation":"

Name of the schema to be created of max length of 255, and may only contain letters, numbers, hyphen, underscore, dollar sign, or hash mark. No whitespace.

" + }, + "DataFormat":{ + "shape":"DataFormat", + "documentation":"

The data format of the schema definition. Currently only AVRO is supported.

" + }, + "Compatibility":{ + "shape":"Compatibility", + "documentation":"

The compatibility mode of the schema. The possible values are:

  • NONE: No compatibility mode applies. You can use this choice in development scenarios or if you do not know the compatibility mode that you want to apply to schemas. Any new version added will be accepted without undergoing a compatibility check.

  • DISABLED: This compatibility choice prevents versioning for a particular schema. You can use this choice to prevent future versioning of a schema.

  • BACKWARD: This compatibility choice is recommended as it allows data receivers to read both the current and one previous schema version. This means that for instance, a new schema version cannot drop data fields or change the type of these fields, so they can't be read by readers using the previous version.

  • BACKWARD_ALL: This compatibility choice allows data receivers to read both the current and all previous schema versions. You can use this choice when you need to delete fields or add optional fields, and check compatibility against all previous schema versions.

  • FORWARD: This compatibility choice allows data receivers to read both the current and one next schema version, but not necessarily later versions. You can use this choice when you need to add fields or delete optional fields, but only check compatibility against the last schema version.

  • FORWARD_ALL: This compatibility choice allows data receivers to read written by producers of any new registered schema. You can use this choice when you need to add fields or delete optional fields, and check compatibility against all previous schema versions.

  • FULL: This compatibility choice allows data receivers to read data written by producers using the previous or next version of the schema, but not necessarily earlier or later versions. You can use this choice when you need to add or remove optional fields, but only check compatibility against the last schema version.

  • FULL_ALL: This compatibility choice allows data receivers to read data written by producers using all previous schema versions. You can use this choice when you need to add or remove optional fields, and check compatibility against all previous schema versions.

" + }, + "Description":{ + "shape":"DescriptionString", + "documentation":"

An optional description of the schema. If description is not provided, there will not be any automatic default value for this.

" + }, + "Tags":{ + "shape":"TagsMap", + "documentation":"

AWS tags that contain a key value pair and may be searched by console, command line, or API. If specified, follows the AWS tags-on-create pattern.

" + }, + "SchemaDefinition":{ + "shape":"SchemaDefinitionString", + "documentation":"

The schema definition using the DataFormat setting for SchemaName.

" + } + } + }, + "CreateSchemaResponse":{ + "type":"structure", + "members":{ + "RegistryName":{ + "shape":"SchemaRegistryNameString", + "documentation":"

The name of the registry.

" + }, + "RegistryArn":{ + "shape":"GlueResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the registry.

" + }, + "SchemaName":{ + "shape":"SchemaRegistryNameString", + "documentation":"

The name of the schema.

" + }, + "SchemaArn":{ + "shape":"GlueResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the schema.

" + }, + "Description":{ + "shape":"DescriptionString", + "documentation":"

A description of the schema if specified when created.

" + }, + "DataFormat":{ + "shape":"DataFormat", + "documentation":"

The data format of the schema definition. Currently only AVRO is supported.

" + }, + "Compatibility":{ + "shape":"Compatibility", + "documentation":"

The schema compatibility mode.

" + }, + "SchemaCheckpoint":{ + "shape":"SchemaCheckpointNumber", + "documentation":"

The version number of the checkpoint (the last time the compatibility mode was changed).

" + }, + "LatestSchemaVersion":{ + "shape":"VersionLongNumber", + "documentation":"

The latest version of the schema associated with the returned schema definition.

" + }, + "NextSchemaVersion":{ + "shape":"VersionLongNumber", + "documentation":"

The next version of the schema associated with the returned schema definition.

" + }, + "SchemaStatus":{ + "shape":"SchemaStatus", + "documentation":"

The status of the schema.

" + }, + "Tags":{ + "shape":"TagsMap", + "documentation":"

The tags for the schema.

" + }, + "SchemaVersionId":{ + "shape":"SchemaVersionIdString", + "documentation":"

The unique identifier of the first schema version.

" + }, + "SchemaVersionStatus":{ + "shape":"SchemaVersionStatus", + "documentation":"

The status of the first schema version created.

" + } + } + }, "CreateScriptRequest":{ "type":"structure", "members":{ @@ -4543,6 +5174,7 @@ }, "documentation":"

Specifies an XML classifier for CreateClassifier to create.

" }, + "CreatedTimestamp":{"type":"string"}, "CronExpression":{"type":"string"}, "CsvClassifier":{ "type":"structure", @@ -4643,6 +5275,10 @@ }, "documentation":"

Contains configuration information for maintaining Data Catalog security.

" }, + "DataFormat":{ + "type":"string", + "enum":["AVRO"] + }, "DataLakePrincipal":{ "type":"structure", "members":{ @@ -4756,22 +5392,22 @@ "members":{ "MinimumValue":{ "shape":"Timestamp", - "documentation":"

Minimum value of the column.

" + "documentation":"

The lowest value in the column.

" }, "MaximumValue":{ "shape":"Timestamp", - "documentation":"

Maximum value of the column.

" + "documentation":"

The highest value in the column.

" }, "NumberOfNulls":{ "shape":"NonNegativeLong", - "documentation":"

Number of nulls.

" + "documentation":"

The number of null values in the column.

" }, "NumberOfDistinctValues":{ "shape":"NonNegativeLong", - "documentation":"

Number of distinct values.

" + "documentation":"

The number of distinct values in a column.

" } }, - "documentation":"

Defines a date column statistics data.

" + "documentation":"

Defines column statistics supported for timestamp data columns.

" }, "DecimalColumnStatisticsData":{ "type":"structure", @@ -4782,22 +5418,22 @@ "members":{ "MinimumValue":{ "shape":"DecimalNumber", - "documentation":"

Minimum value of the column.

" + "documentation":"

The lowest value in the column.

" }, "MaximumValue":{ "shape":"DecimalNumber", - "documentation":"

Maximum value of the column.

" + "documentation":"

The highest value in the column.

" }, "NumberOfNulls":{ "shape":"NonNegativeLong", - "documentation":"

Number of nulls.

" + "documentation":"

The number of null values in the column.

" }, "NumberOfDistinctValues":{ "shape":"NonNegativeLong", - "documentation":"

Number of distinct values.

" + "documentation":"

The number of distinct values in a column.

" } }, - "documentation":"

Defines a decimal column statistics data.

" + "documentation":"

Defines column statistics supported for fixed-point number data columns.

" }, "DecimalNumber":{ "type":"structure", @@ -5019,6 +5655,37 @@ } } }, + "DeletePartitionIndexRequest":{ + "type":"structure", + "required":[ + "DatabaseName", + "TableName", + "IndexName" + ], + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The catalog ID where the table resides.

" + }, + "DatabaseName":{ + "shape":"NameString", + "documentation":"

Specifies the name of a database from which you want to delete a partition index.

" + }, + "TableName":{ + "shape":"NameString", + "documentation":"

Specifies the name of a table from which you want to delete a partition index.

" + }, + "IndexName":{ + "shape":"NameString", + "documentation":"

The name of the partition index to be deleted.

" + } + } + }, + "DeletePartitionIndexResponse":{ + "type":"structure", + "members":{ + } + }, "DeletePartitionRequest":{ "type":"structure", "required":[ @@ -5050,6 +5717,33 @@ "members":{ } }, + "DeleteRegistryInput":{ + "type":"structure", + "required":["RegistryId"], + "members":{ + "RegistryId":{ + "shape":"RegistryId", + "documentation":"

This is a wrapper structure that may contain the registry name and Amazon Resource Name (ARN).

" + } + } + }, + "DeleteRegistryResponse":{ + "type":"structure", + "members":{ + "RegistryName":{ + "shape":"SchemaRegistryNameString", + "documentation":"

The name of the registry being deleted.

" + }, + "RegistryArn":{ + "shape":"GlueResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the registry being deleted.

" + }, + "Status":{ + "shape":"RegistryStatus", + "documentation":"

The status of the registry. A successful operation will return the Deleting status.

" + } + } + }, "DeleteResourcePolicyRequest":{ "type":"structure", "members":{ @@ -5068,6 +5762,59 @@ "members":{ } }, + "DeleteSchemaInput":{ + "type":"structure", + "required":["SchemaId"], + "members":{ + "SchemaId":{ + "shape":"SchemaId", + "documentation":"

This is a wrapper structure that may contain the schema name and Amazon Resource Name (ARN).

" + } + } + }, + "DeleteSchemaResponse":{ + "type":"structure", + "members":{ + "SchemaArn":{ + "shape":"GlueResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the schema being deleted.

" + }, + "SchemaName":{ + "shape":"SchemaRegistryNameString", + "documentation":"

The name of the schema being deleted.

" + }, + "Status":{ + "shape":"SchemaStatus", + "documentation":"

The status of the schema.

" + } + } + }, + "DeleteSchemaVersionsInput":{ + "type":"structure", + "required":[ + "SchemaId", + "Versions" + ], + "members":{ + "SchemaId":{ + "shape":"SchemaId", + "documentation":"

This is a wrapper structure that may contain the schema name and Amazon Resource Name (ARN).

" + }, + "Versions":{ + "shape":"VersionsString", + "documentation":"

A version range may be supplied which may be of the format:

  • a single version number, 5

  • a range, 5-8 : deletes versions 5, 6, 7, 8

" + } + } + }, + "DeleteSchemaVersionsResponse":{ + "type":"structure", + "members":{ + "SchemaVersionErrors":{ + "shape":"SchemaVersionErrorList", + "documentation":"

A list of SchemaVersionErrorItem objects, each containing an error and schema version.

" + } + } + }, "DeleteSecurityConfigurationRequest":{ "type":"structure", "required":["Name"], @@ -5360,22 +6107,22 @@ "members":{ "MinimumValue":{ "shape":"Double", - "documentation":"

Minimum value of the column.

" + "documentation":"

The lowest value in the column.

" }, "MaximumValue":{ "shape":"Double", - "documentation":"

Maximum value of the column.

" + "documentation":"

The highest value in the column.

" }, "NumberOfNulls":{ "shape":"NonNegativeLong", - "documentation":"

Number of nulls.

" + "documentation":"

The number of null values in the column.

" }, "NumberOfDistinctValues":{ "shape":"NonNegativeLong", - "documentation":"

Number of distinct values.

" + "documentation":"

The number of distinct values in a column.

" } }, - "documentation":"

Defines a double column statistics data.

" + "documentation":"

Defines column statistics supported for floating-point number data columns.

" }, "DynamoDBTarget":{ "type":"structure", @@ -5473,6 +6220,7 @@ "key":{"shape":"NameString"}, "value":{"shape":"ErrorDetail"} }, + "ErrorCodeString":{"type":"string"}, "ErrorDetail":{ "type":"structure", "members":{ @@ -5487,6 +6235,21 @@ }, "documentation":"

Contains details about an error.

" }, + "ErrorDetails":{ + "type":"structure", + "members":{ + "ErrorCode":{ + "shape":"ErrorCodeString", + "documentation":"

The error code for an error.

" + }, + "ErrorMessage":{ + "shape":"ErrorMessageString", + "documentation":"

The error message for an error.

" + } + }, + "documentation":"

An object containing error details.

" + }, + "ErrorMessageString":{"type":"string"}, "ErrorString":{"type":"string"}, "EvaluationMetrics":{ "type":"structure", @@ -6385,6 +7148,10 @@ "MaxRetries":{ "shape":"NullableInteger", "documentation":"

The maximum number of times to retry a task for this transform after a task run fails.

" + }, + "TransformEncryption":{ + "shape":"TransformEncryption", + "documentation":"

The encryption-at-rest settings of the transform that apply to accessing user data. Machine learning transforms can access user data encrypted in Amazon S3 using KMS.

" } } }, @@ -6603,7 +7370,7 @@ }, "AdditionalPlanOptionsMap":{ "shape":"AdditionalPlanOptionsMap", - "documentation":"

A map to hold additional optional key-value parameters.

" + "documentation":"

A map to hold additional optional key-value parameters.

Currently, these key-value pairs are supported:

  • inferSchema  —  Specifies whether to set inferSchema to true or false for the default script generated by an AWS Glue job. For example, to set inferSchema to true, pass the following key value pair:

    --additional-plan-options-map '{\"inferSchema\":\"true\"}'

" } } }, @@ -6620,6 +7387,45 @@ } } }, + "GetRegistryInput":{ + "type":"structure", + "required":["RegistryId"], + "members":{ + "RegistryId":{ + "shape":"RegistryId", + "documentation":"

This is a wrapper structure that may contain the registry name and Amazon Resource Name (ARN).

" + } + } + }, + "GetRegistryResponse":{ + "type":"structure", + "members":{ + "RegistryName":{ + "shape":"SchemaRegistryNameString", + "documentation":"

The name of the registry.

" + }, + "RegistryArn":{ + "shape":"GlueResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the registry.

" + }, + "Description":{ + "shape":"DescriptionString", + "documentation":"

A description of the registry.

" + }, + "Status":{ + "shape":"RegistryStatus", + "documentation":"

The status of the registry.

" + }, + "CreatedTime":{ + "shape":"CreatedTimestamp", + "documentation":"

The date and time the registry was created.

" + }, + "UpdatedTime":{ + "shape":"UpdatedTimestamp", + "documentation":"

The date and time the registry was updated.

" + } + } + }, "GetResourcePoliciesRequest":{ "type":"structure", "members":{ @@ -6680,6 +7486,201 @@ } } }, + "GetSchemaByDefinitionInput":{ + "type":"structure", + "required":[ + "SchemaId", + "SchemaDefinition" + ], + "members":{ + "SchemaId":{ + "shape":"SchemaId", + "documentation":"

This is a wrapper structure to contain schema identity fields. The structure contains:

  • SchemaId$SchemaArn: The Amazon Resource Name (ARN) of the schema. One of SchemaArn or SchemaName has to be provided.

  • SchemaId$SchemaName: The name of the schema. One of SchemaArn or SchemaName has to be provided.

" + }, + "SchemaDefinition":{ + "shape":"SchemaDefinitionString", + "documentation":"

The definition of the schema for which schema details are required.

" + } + } + }, + "GetSchemaByDefinitionResponse":{ + "type":"structure", + "members":{ + "SchemaVersionId":{ + "shape":"SchemaVersionIdString", + "documentation":"

The schema ID of the schema version.

" + }, + "SchemaArn":{ + "shape":"GlueResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the schema.

" + }, + "DataFormat":{ + "shape":"DataFormat", + "documentation":"

The data format of the schema definition. Currently only AVRO is supported.

" + }, + "Status":{ + "shape":"SchemaVersionStatus", + "documentation":"

The status of the schema version.

" + }, + "CreatedTime":{ + "shape":"CreatedTimestamp", + "documentation":"

The date and time the schema was created.

" + } + } + }, + "GetSchemaInput":{ + "type":"structure", + "required":["SchemaId"], + "members":{ + "SchemaId":{ + "shape":"SchemaId", + "documentation":"

This is a wrapper structure to contain schema identity fields. The structure contains:

  • SchemaId$SchemaArn: The Amazon Resource Name (ARN) of the schema. Either SchemaArn or SchemaName and RegistryName has to be provided.

  • SchemaId$SchemaName: The name of the schema. Either SchemaArn or SchemaName and RegistryName has to be provided.

" + } + } + }, + "GetSchemaResponse":{ + "type":"structure", + "members":{ + "RegistryName":{ + "shape":"SchemaRegistryNameString", + "documentation":"

The name of the registry.

" + }, + "RegistryArn":{ + "shape":"GlueResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the registry.

" + }, + "SchemaName":{ + "shape":"SchemaRegistryNameString", + "documentation":"

The name of the schema.

" + }, + "SchemaArn":{ + "shape":"GlueResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the schema.

" + }, + "Description":{ + "shape":"DescriptionString", + "documentation":"

A description of schema if specified when created

" + }, + "DataFormat":{ + "shape":"DataFormat", + "documentation":"

The data format of the schema definition. Currently only AVRO is supported.

" + }, + "Compatibility":{ + "shape":"Compatibility", + "documentation":"

The compatibility mode of the schema.

" + }, + "SchemaCheckpoint":{ + "shape":"SchemaCheckpointNumber", + "documentation":"

The version number of the checkpoint (the last time the compatibility mode was changed).

" + }, + "LatestSchemaVersion":{ + "shape":"VersionLongNumber", + "documentation":"

The latest version of the schema associated with the returned schema definition.

" + }, + "NextSchemaVersion":{ + "shape":"VersionLongNumber", + "documentation":"

The next version of the schema associated with the returned schema definition.

" + }, + "SchemaStatus":{ + "shape":"SchemaStatus", + "documentation":"

The status of the schema.

" + }, + "CreatedTime":{ + "shape":"CreatedTimestamp", + "documentation":"

The date and time the schema was created.

" + }, + "UpdatedTime":{ + "shape":"UpdatedTimestamp", + "documentation":"

The date and time the schema was updated.

" + } + } + }, + "GetSchemaVersionInput":{ + "type":"structure", + "members":{ + "SchemaId":{ + "shape":"SchemaId", + "documentation":"

This is a wrapper structure to contain schema identity fields. The structure contains:

  • SchemaId$SchemaArn: The Amazon Resource Name (ARN) of the schema. Either SchemaArn or SchemaName and RegistryName has to be provided.

  • SchemaId$SchemaName: The name of the schema. Either SchemaArn or SchemaName and RegistryName has to be provided.

" + }, + "SchemaVersionId":{ + "shape":"SchemaVersionIdString", + "documentation":"

The SchemaVersionId of the schema version. This field is required for fetching by schema ID. Either this or the SchemaId wrapper has to be provided.

" + }, + "SchemaVersionNumber":{ + "shape":"SchemaVersionNumber", + "documentation":"

The version number of the schema.

" + } + } + }, + "GetSchemaVersionResponse":{ + "type":"structure", + "members":{ + "SchemaVersionId":{ + "shape":"SchemaVersionIdString", + "documentation":"

The SchemaVersionId of the schema version.

" + }, + "SchemaDefinition":{ + "shape":"SchemaDefinitionString", + "documentation":"

The schema definition for the schema ID.

" + }, + "DataFormat":{ + "shape":"DataFormat", + "documentation":"

The data format of the schema definition. Currently only AVRO is supported.

" + }, + "SchemaArn":{ + "shape":"GlueResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the schema.

" + }, + "VersionNumber":{ + "shape":"VersionLongNumber", + "documentation":"

The version number of the schema.

" + }, + "Status":{ + "shape":"SchemaVersionStatus", + "documentation":"

The status of the schema version.

" + }, + "CreatedTime":{ + "shape":"CreatedTimestamp", + "documentation":"

The date and time the schema version was created.

" + } + } + }, + "GetSchemaVersionsDiffInput":{ + "type":"structure", + "required":[ + "SchemaId", + "FirstSchemaVersionNumber", + "SecondSchemaVersionNumber", + "SchemaDiffType" + ], + "members":{ + "SchemaId":{ + "shape":"SchemaId", + "documentation":"

This is a wrapper structure to contain schema identity fields. The structure contains:

  • SchemaId$SchemaArn: The Amazon Resource Name (ARN) of the schema. One of SchemaArn or SchemaName has to be provided.

  • SchemaId$SchemaName: The name of the schema. One of SchemaArn or SchemaName has to be provided.

" + }, + "FirstSchemaVersionNumber":{ + "shape":"SchemaVersionNumber", + "documentation":"

The first of the two schema versions to be compared.

" + }, + "SecondSchemaVersionNumber":{ + "shape":"SchemaVersionNumber", + "documentation":"

The second of the two schema versions to be compared.

" + }, + "SchemaDiffType":{ + "shape":"SchemaDiffType", + "documentation":"

Refers to SYNTAX_DIFF, which is the currently supported diff type.

" + } + } + }, + "GetSchemaVersionsDiffResponse":{ + "type":"structure", + "members":{ + "Diff":{ + "shape":"SchemaDefinitionDiff", + "documentation":"

The difference between schemas as a string in JsonPatch format.

" + } + } + }, "GetSecurityConfigurationRequest":{ "type":"structure", "required":["Name"], @@ -7339,6 +8340,7 @@ "documentation":"

The input provided was not valid.

", "exception":true }, + "IsVersionValid":{"type":"boolean"}, "JdbcTarget":{ "type":"structure", "members":{ @@ -7855,6 +8857,17 @@ "FAILED" ] }, + "LatestSchemaVersionBoolean":{"type":"boolean"}, + "LineageConfiguration":{ + "type":"structure", + "members":{ + "CrawlerLineageSettings":{ + "shape":"CrawlerLineageSettings", + "documentation":"

Specifies whether data lineage is enabled for the crawler. Valid values are:

  • ENABLE: enables data lineage for the crawler

  • DISABLE: disables data lineage for the crawler

" + } + }, + "documentation":"

Specifies data lineage configuration settings for the crawler.

" + }, "ListCrawlersRequest":{ "type":"structure", "members":{ @@ -7984,6 +8997,93 @@ } } }, + "ListRegistriesInput":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"MaxResultsNumber", + "documentation":"

Maximum number of results required per page. If the value is not supplied, this will be defaulted to 25 per page.

" + }, + "NextToken":{ + "shape":"SchemaRegistryTokenString", + "documentation":"

A continuation token, if this is a continuation call.

" + } + } + }, + "ListRegistriesResponse":{ + "type":"structure", + "members":{ + "Registries":{ + "shape":"RegistryListDefinition", + "documentation":"

An array of RegistryDetailedListItem objects containing minimal details of each registry.

" + }, + "NextToken":{ + "shape":"SchemaRegistryTokenString", + "documentation":"

A continuation token for paginating the returned list of tokens, returned if the current segment of the list is not the last.

" + } + } + }, + "ListSchemaVersionsInput":{ + "type":"structure", + "required":["SchemaId"], + "members":{ + "SchemaId":{ + "shape":"SchemaId", + "documentation":"

This is a wrapper structure to contain schema identity fields. The structure contains:

  • SchemaId$SchemaArn: The Amazon Resource Name (ARN) of the schema. Either SchemaArn or SchemaName and RegistryName has to be provided.

  • SchemaId$SchemaName: The name of the schema. Either SchemaArn or SchemaName and RegistryName has to be provided.

" + }, + "MaxResults":{ + "shape":"MaxResultsNumber", + "documentation":"

Maximum number of results required per page. If the value is not supplied, this will be defaulted to 25 per page.

" + }, + "NextToken":{ + "shape":"SchemaRegistryTokenString", + "documentation":"

A continuation token, if this is a continuation call.

" + } + } + }, + "ListSchemaVersionsResponse":{ + "type":"structure", + "members":{ + "Schemas":{ + "shape":"SchemaVersionList", + "documentation":"

An array of SchemaVersionList objects containing details of each schema version.

" + }, + "NextToken":{ + "shape":"SchemaRegistryTokenString", + "documentation":"

A continuation token for paginating the returned list of tokens, returned if the current segment of the list is not the last.

" + } + } + }, + "ListSchemasInput":{ + "type":"structure", + "members":{ + "RegistryId":{ + "shape":"RegistryId", + "documentation":"

A wrapper structure that may contain the registry name and Amazon Resource Name (ARN).

" + }, + "MaxResults":{ + "shape":"MaxResultsNumber", + "documentation":"

Maximum number of results required per page. If the value is not supplied, this will be defaulted to 25 per page.

" + }, + "NextToken":{ + "shape":"SchemaRegistryTokenString", + "documentation":"

A continuation token, if this is a continuation call.

" + } + } + }, + "ListSchemasResponse":{ + "type":"structure", + "members":{ + "Schemas":{ + "shape":"SchemaListDefinition", + "documentation":"

An array of SchemaListItem objects containing details of each schema.

" + }, + "NextToken":{ + "shape":"SchemaRegistryTokenString", + "documentation":"

A continuation token for paginating the returned list of tokens, returned if the current segment of the list is not the last.

" + } + } + }, "ListTriggersRequest":{ "type":"structure", "members":{ @@ -8105,22 +9205,22 @@ "members":{ "MinimumValue":{ "shape":"Long", - "documentation":"

Minimum value of the column.

" + "documentation":"

The lowest value in the column.

" }, "MaximumValue":{ "shape":"Long", - "documentation":"

Maximum value of the column.

" + "documentation":"

The highest value in the column.

" }, "NumberOfNulls":{ "shape":"NonNegativeLong", - "documentation":"

Number of nulls.

" + "documentation":"

The number of null values in the column.

" }, "NumberOfDistinctValues":{ "shape":"NonNegativeLong", - "documentation":"

Number of distinct values.

" + "documentation":"

The number of distinct values in a column.

" } }, - "documentation":"

Defines a long column statistics data.

" + "documentation":"

Defines column statistics supported for integer data columns.

" }, "MLTransform":{ "type":"structure", @@ -8196,6 +9296,10 @@ "MaxRetries":{ "shape":"NullableInteger", "documentation":"

The maximum number of times to retry after an MLTaskRun of the machine learning transform fails.

" + }, + "TransformEncryption":{ + "shape":"TransformEncryption", + "documentation":"

The encryption-at-rest settings of the transform that apply to accessing user data. Machine learning transforms can access user data encrypted in Amazon S3 using KMS.

" } }, "documentation":"

A structure for a machine learning transform.

" @@ -8211,6 +9315,28 @@ "documentation":"

The machine learning transform is not ready to run.

", "exception":true }, + "MLUserDataEncryption":{ + "type":"structure", + "required":["MlUserDataEncryptionMode"], + "members":{ + "MlUserDataEncryptionMode":{ + "shape":"MLUserDataEncryptionModeString", + "documentation":"

The encryption mode applied to user data. Valid values are:

  • DISABLED: encryption is disabled

  • SSEKMS: use of server-side encryption with AWS Key Management Service (SSE-KMS) for user data stored in Amazon S3.

" + }, + "KmsKeyId":{ + "shape":"NameString", + "documentation":"

The ID for the customer-provided KMS key.

" + } + }, + "documentation":"

The encryption-at-rest settings of the transform that apply to accessing user data.

" + }, + "MLUserDataEncryptionModeString":{ + "type":"string", + "enum":[ + "DISABLED", + "SSE-KMS" + ] + }, "MapValue":{ "type":"map", "key":{"shape":"GenericString"}, @@ -8259,6 +9385,12 @@ "min":0 }, "MaxConcurrentRuns":{"type":"integer"}, + "MaxResultsNumber":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, "MaxRetries":{"type":"integer"}, "MessagePrefix":{ "type":"string", @@ -8267,6 +9399,55 @@ "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" }, "MessageString":{"type":"string"}, + "MetadataInfo":{ + "type":"structure", + "members":{ + "MetadataValue":{ + "shape":"MetadataValueString", + "documentation":"

The metadata key’s corresponding value.

" + }, + "CreatedTime":{ + "shape":"CreatedTimestamp", + "documentation":"

The time at which the entry was created.

" + } + }, + "documentation":"

A structure containing metadata information for a schema version.

" + }, + "MetadataInfoMap":{ + "type":"map", + "key":{"shape":"MetadataKeyString"}, + "value":{"shape":"MetadataInfo"} + }, + "MetadataKeyString":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[a-zA-Z0-9+-=._./@]+" + }, + "MetadataKeyValuePair":{ + "type":"structure", + "members":{ + "MetadataKey":{ + "shape":"MetadataKeyString", + "documentation":"

A metadata key.

" + }, + "MetadataValue":{ + "shape":"MetadataValueString", + "documentation":"

A metadata key’s corresponding value.

" + } + }, + "documentation":"

A structure containing a key value pair for metadata.

" + }, + "MetadataList":{ + "type":"list", + "member":{"shape":"MetadataKeyValuePair"} + }, + "MetadataValueString":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[a-zA-Z0-9+-=._./@]+" + }, "MillisecondsCount":{"type":"long"}, "MongoDBTarget":{ "type":"structure", @@ -8545,7 +9726,11 @@ }, "IndexStatus":{ "shape":"PartitionIndexStatus", - "documentation":"

The status of the partition index.

" + "documentation":"

The status of the partition index.

The possible statuses are:

  • CREATING: The index is being created. When an index is in a CREATING state, the index or its table cannot be deleted.

  • ACTIVE: The index creation succeeds.

  • FAILED: The index creation fails.

  • DELETING: The index is deleted from the list of indexes.

" + }, + "BackfillErrors":{ + "shape":"BackfillErrors", + "documentation":"

A list of errors that can occur when registering partition indexes for an existing table.

" } }, "documentation":"

A descriptor for a partition index in a table.

" @@ -8561,7 +9746,12 @@ }, "PartitionIndexStatus":{ "type":"string", - "enum":["ACTIVE"] + "enum":[ + "CREATING", + "ACTIVE", + "DELETING", + "FAILED" + ] }, "PartitionInput":{ "type":"structure", @@ -8797,6 +9987,65 @@ } } }, + "PutSchemaVersionMetadataInput":{ + "type":"structure", + "required":["MetadataKeyValue"], + "members":{ + "SchemaId":{ + "shape":"SchemaId", + "documentation":"

The unique ID for the schema.

" + }, + "SchemaVersionNumber":{ + "shape":"SchemaVersionNumber", + "documentation":"

The version number of the schema.

" + }, + "SchemaVersionId":{ + "shape":"SchemaVersionIdString", + "documentation":"

The unique version ID of the schema version.

" + }, + "MetadataKeyValue":{ + "shape":"MetadataKeyValuePair", + "documentation":"

The metadata key's corresponding value.

" + } + } + }, + "PutSchemaVersionMetadataResponse":{ + "type":"structure", + "members":{ + "SchemaArn":{ + "shape":"GlueResourceArn", + "documentation":"

The Amazon Resource Name (ARN) for the schema.

" + }, + "SchemaName":{ + "shape":"SchemaRegistryNameString", + "documentation":"

The name for the schema.

" + }, + "RegistryName":{ + "shape":"SchemaRegistryNameString", + "documentation":"

The name for the registry.

" + }, + "LatestVersion":{ + "shape":"LatestSchemaVersionBoolean", + "documentation":"

The latest version of the schema.

" + }, + "VersionNumber":{ + "shape":"VersionLongNumber", + "documentation":"

The version number of the schema.

" + }, + "SchemaVersionId":{ + "shape":"SchemaVersionIdString", + "documentation":"

The unique version ID of the schema version.

" + }, + "MetadataKey":{ + "shape":"MetadataKeyString", + "documentation":"

The metadata key.

" + }, + "MetadataValue":{ + "shape":"MetadataValueString", + "documentation":"

The value of the metadata key.

" + } + } + }, "PutWorkflowRunPropertiesRequest":{ "type":"structure", "required":[ @@ -8829,10 +10078,226 @@ "type":"string", "pattern":"^[2-3]$" }, + "QuerySchemaVersionMetadataInput":{ + "type":"structure", + "members":{ + "SchemaId":{ + "shape":"SchemaId", + "documentation":"

A wrapper structure that may contain the schema name and Amazon Resource Name (ARN).

" + }, + "SchemaVersionNumber":{ + "shape":"SchemaVersionNumber", + "documentation":"

The version number of the schema.

" + }, + "SchemaVersionId":{ + "shape":"SchemaVersionIdString", + "documentation":"

The unique version ID of the schema version.

" + }, + "MetadataList":{ + "shape":"MetadataList", + "documentation":"

Search key-value pairs for metadata, if they are not provided all the metadata information will be fetched.

" + }, + "MaxResults":{ + "shape":"QuerySchemaVersionMetadataMaxResults", + "documentation":"

Maximum number of results required per page. If the value is not supplied, this will be defaulted to 25 per page.

" + }, + "NextToken":{ + "shape":"SchemaRegistryTokenString", + "documentation":"

A continuation token, if this is a continuation call.

" + } + } + }, + "QuerySchemaVersionMetadataMaxResults":{ + "type":"integer", + "max":50, + "min":1 + }, + "QuerySchemaVersionMetadataResponse":{ + "type":"structure", + "members":{ + "MetadataInfoMap":{ + "shape":"MetadataInfoMap", + "documentation":"

A map of a metadata key and associated values.

" + }, + "SchemaVersionId":{ + "shape":"SchemaVersionIdString", + "documentation":"

The unique version ID of the schema version.

" + }, + "NextToken":{ + "shape":"SchemaRegistryTokenString", + "documentation":"

A continuation token for paginating the returned list of tokens, returned if the current segment of the list is not the last.

" + } + } + }, "RecordsCount":{ "type":"long", "box":true }, + "RecrawlBehavior":{ + "type":"string", + "enum":[ + "CRAWL_EVERYTHING", + "CRAWL_NEW_FOLDERS_ONLY" + ] + }, + "RecrawlPolicy":{ + "type":"structure", + "members":{ + "RecrawlBehavior":{ + "shape":"RecrawlBehavior", + "documentation":"

Specifies whether to crawl the entire dataset again or to crawl only folders that were added since the last crawler run.

A value of CRAWL_EVERYTHING specifies crawling the entire dataset again.

A value of CRAWL_NEW_FOLDERS_ONLY specifies crawling only folders that were added since the last crawler run.

" + } + }, + "documentation":"

When crawling an Amazon S3 data source after the first crawl is complete, specifies whether to crawl the entire dataset again or to crawl only folders that were added since the last crawler run. For more information, see Incremental Crawls in AWS Glue in the developer guide.

" + }, + "RegisterSchemaVersionInput":{ + "type":"structure", + "required":[ + "SchemaId", + "SchemaDefinition" + ], + "members":{ + "SchemaId":{ + "shape":"SchemaId", + "documentation":"

This is a wrapper structure to contain schema identity fields. The structure contains:

  • SchemaId$SchemaArn: The Amazon Resource Name (ARN) of the schema. Either SchemaArn or SchemaName and RegistryName has to be provided.

  • SchemaId$SchemaName: The name of the schema. Either SchemaArn or SchemaName and RegistryName has to be provided.

" + }, + "SchemaDefinition":{ + "shape":"SchemaDefinitionString", + "documentation":"

The schema definition using the DataFormat setting for the SchemaName.

" + } + } + }, + "RegisterSchemaVersionResponse":{ + "type":"structure", + "members":{ + "SchemaVersionId":{ + "shape":"SchemaVersionIdString", + "documentation":"

The unique ID that represents the version of this schema.

" + }, + "VersionNumber":{ + "shape":"VersionLongNumber", + "documentation":"

The version of this schema (for sync flow only, in case this is the first version).

" + }, + "Status":{ + "shape":"SchemaVersionStatus", + "documentation":"

The status of the schema version.

" + } + } + }, + "RegistryId":{ + "type":"structure", + "members":{ + "RegistryName":{ + "shape":"SchemaRegistryNameString", + "documentation":"

Name of the registry. Used only for lookup. One of RegistryArn or RegistryName has to be provided.

" + }, + "RegistryArn":{ + "shape":"GlueResourceArn", + "documentation":"

Arn of the registry to be updated. One of RegistryArn or RegistryName has to be provided.

" + } + }, + "documentation":"

A wrapper structure that may contain the registry name and Amazon Resource Name (ARN).

" + }, + "RegistryListDefinition":{ + "type":"list", + "member":{"shape":"RegistryListItem"} + }, + "RegistryListItem":{ + "type":"structure", + "members":{ + "RegistryName":{ + "shape":"SchemaRegistryNameString", + "documentation":"

The name of the registry.

" + }, + "RegistryArn":{ + "shape":"GlueResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the registry.

" + }, + "Description":{ + "shape":"DescriptionString", + "documentation":"

A description of the registry.

" + }, + "Status":{ + "shape":"RegistryStatus", + "documentation":"

The status of the registry.

" + }, + "CreatedTime":{ + "shape":"CreatedTimestamp", + "documentation":"

The data the registry was created.

" + }, + "UpdatedTime":{ + "shape":"UpdatedTimestamp", + "documentation":"

The date the registry was updated.

" + } + }, + "documentation":"

A structure containing the details for a registry.

" + }, + "RegistryStatus":{ + "type":"string", + "enum":[ + "AVAILABLE", + "DELETING" + ] + }, + "RemoveSchemaVersionMetadataInput":{ + "type":"structure", + "required":["MetadataKeyValue"], + "members":{ + "SchemaId":{ + "shape":"SchemaId", + "documentation":"

A wrapper structure that may contain the schema name and Amazon Resource Name (ARN).

" + }, + "SchemaVersionNumber":{ + "shape":"SchemaVersionNumber", + "documentation":"

The version number of the schema.

" + }, + "SchemaVersionId":{ + "shape":"SchemaVersionIdString", + "documentation":"

The unique version ID of the schema version.

" + }, + "MetadataKeyValue":{ + "shape":"MetadataKeyValuePair", + "documentation":"

The value of the metadata key.

" + } + } + }, + "RemoveSchemaVersionMetadataResponse":{ + "type":"structure", + "members":{ + "SchemaArn":{ + "shape":"GlueResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the schema.

" + }, + "SchemaName":{ + "shape":"SchemaRegistryNameString", + "documentation":"

The name of the schema.

" + }, + "RegistryName":{ + "shape":"SchemaRegistryNameString", + "documentation":"

The name of the registry.

" + }, + "LatestVersion":{ + "shape":"LatestSchemaVersionBoolean", + "documentation":"

The latest version of the schema.

" + }, + "VersionNumber":{ + "shape":"VersionLongNumber", + "documentation":"

The version number of the schema.

" + }, + "SchemaVersionId":{ + "shape":"SchemaVersionIdString", + "documentation":"

The version ID for the schema version.

" + }, + "MetadataKey":{ + "shape":"MetadataKeyString", + "documentation":"

The metadata key.

" + }, + "MetadataValue":{ + "shape":"MetadataValueString", + "documentation":"

The value of the metadata key.

" + } + } + }, "ReplaceBoolean":{"type":"boolean"}, "ResetJobBookmarkRequest":{ "type":"structure", @@ -9064,6 +10529,11 @@ }, "documentation":"

A policy that specifies update and deletion behaviors for the crawler.

" }, + "SchemaCheckpointNumber":{ + "type":"long", + "max":100000, + "min":1 + }, "SchemaColumn":{ "type":"structure", "members":{ @@ -9078,7 +10548,178 @@ }, "documentation":"

A key-value pair representing a column and data type that this transform can run against. The Schema parameter of the MLTransform may contain up to 100 of these structures.

" }, + "SchemaDefinitionDiff":{ + "type":"string", + "max":340000, + "min":1, + "pattern":".*\\S.*" + }, + "SchemaDefinitionString":{ + "type":"string", + "max":170000, + "min":1, + "pattern":".*\\S.*" + }, + "SchemaDiffType":{ + "type":"string", + "enum":["SYNTAX_DIFF"] + }, + "SchemaId":{ + "type":"structure", + "members":{ + "SchemaArn":{"shape":"GlueResourceArn"}, + "SchemaName":{"shape":"SchemaRegistryNameString"}, + "RegistryName":{"shape":"SchemaRegistryNameString"} + } + }, + "SchemaListDefinition":{ + "type":"list", + "member":{"shape":"SchemaListItem"} + }, + "SchemaListItem":{ + "type":"structure", + "members":{ + "RegistryName":{ + "shape":"SchemaRegistryNameString", + "documentation":"

the name of the registry where the schema resides.

" + }, + "SchemaName":{ + "shape":"SchemaRegistryNameString", + "documentation":"

The name of the schema.

" + }, + "SchemaArn":{ + "shape":"GlueResourceArn", + "documentation":"

The Amazon Resource Name (ARN) for the schema.

" + }, + "Description":{ + "shape":"DescriptionString", + "documentation":"

A description for the schema.

" + }, + "SchemaStatus":{ + "shape":"SchemaStatus", + "documentation":"

The status of the schema.

" + }, + "CreatedTime":{ + "shape":"CreatedTimestamp", + "documentation":"

The date and time that a schema was created.

" + }, + "UpdatedTime":{ + "shape":"UpdatedTimestamp", + "documentation":"

The date and time that a schema was updated.

" + } + }, + "documentation":"

An object that contains minimal details for a schema.

" + }, "SchemaPathString":{"type":"string"}, + "SchemaReference":{ + "type":"structure", + "members":{ + "SchemaId":{ + "shape":"SchemaId", + "documentation":"

A structure that contains schema identity fields. Either this or the SchemaVersionId has to be provided.

" + }, + "SchemaVersionId":{ + "shape":"SchemaVersionIdString", + "documentation":"

The unique ID assigned to a version of the schema. Either this or the SchemaId has to be provided.

" + }, + "SchemaVersionNumber":{ + "shape":"VersionLongNumber", + "documentation":"

The version number of the schema.

", + "box":true + } + }, + "documentation":"

An object that references a schema stored in the AWS Glue Schema Registry.

" + }, + "SchemaRegistryNameString":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[a-zA-Z0-9-_$#]+" + }, + "SchemaRegistryTokenString":{"type":"string"}, + "SchemaStatus":{ + "type":"string", + "enum":[ + "AVAILABLE", + "PENDING", + "DELETING" + ] + }, + "SchemaValidationError":{ + "type":"string", + "max":5000, + "min":1 + }, + "SchemaVersionErrorItem":{ + "type":"structure", + "members":{ + "VersionNumber":{ + "shape":"VersionLongNumber", + "documentation":"

The version number of the schema.

" + }, + "ErrorDetails":{ + "shape":"ErrorDetails", + "documentation":"

The details of the error for the schema version.

" + } + }, + "documentation":"

An object that contains the error details for an operation on a schema version.

" + }, + "SchemaVersionErrorList":{ + "type":"list", + "member":{"shape":"SchemaVersionErrorItem"} + }, + "SchemaVersionIdString":{ + "type":"string", + "max":36, + "min":36, + "pattern":"[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}" + }, + "SchemaVersionList":{ + "type":"list", + "member":{"shape":"SchemaVersionListItem"} + }, + "SchemaVersionListItem":{ + "type":"structure", + "members":{ + "SchemaArn":{ + "shape":"GlueResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the schema.

" + }, + "SchemaVersionId":{ + "shape":"SchemaVersionIdString", + "documentation":"

The unique identifier of the schema version.

" + }, + "VersionNumber":{ + "shape":"VersionLongNumber", + "documentation":"

The version number of the schema.

" + }, + "Status":{ + "shape":"SchemaVersionStatus", + "documentation":"

The status of the schema version.

" + }, + "CreatedTime":{ + "shape":"CreatedTimestamp", + "documentation":"

The date and time the schema version was created.

" + } + }, + "documentation":"

An object containing the details about a schema version.

" + }, + "SchemaVersionNumber":{ + "type":"structure", + "members":{ + "LatestVersion":{"shape":"LatestSchemaVersionBoolean"}, + "VersionNumber":{"shape":"VersionLongNumber"} + } + }, + "SchemaVersionStatus":{ + "type":"string", + "enum":[ + "AVAILABLE", + "PENDING", + "FAILURE", + "DELETING" + ] + }, "ScriptLocationString":{"type":"string"}, "SearchPropertyPredicates":{ "type":"list", @@ -9593,6 +11234,10 @@ "StoredAsSubDirectories":{ "shape":"Boolean", "documentation":"

True if the table data is stored in subdirectories, or False if not.

" + }, + "SchemaReference":{ + "shape":"SchemaReference", + "documentation":"

An object that references a schema stored in the AWS Glue Schema Registry.

When creating a table, you can pass an empty list of columns for the schema, and instead use a schema reference.

" } }, "documentation":"

Describes the physical storage of table data.

" @@ -9608,22 +11253,22 @@ "members":{ "MaximumLength":{ "shape":"NonNegativeLong", - "documentation":"

Maximum value of the column.

" + "documentation":"

The size of the longest string in the column.

" }, "AverageLength":{ "shape":"NonNegativeDouble", - "documentation":"

Average value of the column.

" + "documentation":"

The average string length in the column.

" }, "NumberOfNulls":{ "shape":"NonNegativeLong", - "documentation":"

Number of nulls.

" + "documentation":"

The number of null values in the column.

" }, "NumberOfDistinctValues":{ "shape":"NonNegativeLong", - "documentation":"

Number of distinct values.

" + "documentation":"

The number of distinct values in a column.

" } }, - "documentation":"

Defines a string column statistics data.

" + "documentation":"

Defines column statistics supported for character sequence data values.

" }, "StringList":{ "type":"list", @@ -10061,6 +11706,20 @@ "max":10, "min":1 }, + "TransformEncryption":{ + "type":"structure", + "members":{ + "MlUserDataEncryption":{ + "shape":"MLUserDataEncryption", + "documentation":"

An MLUserDataEncryption object containing the encryption mode and customer-provided KMS key ID.

" + }, + "TaskRunSecurityConfigurationName":{ + "shape":"NameString", + "documentation":"

The name of the security configuration.

" + } + }, + "documentation":"

The encryption-at-rest settings of the transform that apply to accessing user data. Machine learning transforms can access user data encrypted in Amazon S3 using KMS.

Additionally, imported labels and trained transforms can now be encrypted using a customer provided KMS key.

" + }, "TransformFilterCriteria":{ "type":"structure", "members":{ @@ -10117,7 +11776,7 @@ "members":{ "TransformType":{ "shape":"TransformType", - "documentation":"

The type of machine learning transform.

For information about the types of machine learning transforms, see Creating Machine Learning Transforms.

" + "documentation":"

The type of machine learning transform.

For information about the types of machine learning transforms, see Creating Machine Learning Transforms.

" }, "FindMatchesParameters":{ "shape":"FindMatchesParameters", @@ -10492,6 +12151,14 @@ "shape":"SchemaChangePolicy", "documentation":"

The policy for the crawler's update and deletion behavior.

" }, + "RecrawlPolicy":{ + "shape":"RecrawlPolicy", + "documentation":"

A policy that specifies whether to crawl the entire dataset again, or to crawl only folders that were added since the last crawler run.

" + }, + "LineageConfiguration":{ + "shape":"LineageConfiguration", + "documentation":"

Specifies data lineage configuration settings for the crawler.

" + }, "Configuration":{ "shape":"CrawlerConfiguration", "documentation":"

Crawler configuration information. This versioned JSON string allows users to specify aspects of a crawler's behavior. For more information, see Configuring a Crawler.

" @@ -10789,6 +12456,75 @@ "members":{ } }, + "UpdateRegistryInput":{ + "type":"structure", + "required":[ + "RegistryId", + "Description" + ], + "members":{ + "RegistryId":{ + "shape":"RegistryId", + "documentation":"

This is a wrapper structure that may contain the registry name and Amazon Resource Name (ARN).

" + }, + "Description":{ + "shape":"DescriptionString", + "documentation":"

A description of the registry. If description is not provided, this field will not be updated.

" + } + } + }, + "UpdateRegistryResponse":{ + "type":"structure", + "members":{ + "RegistryName":{ + "shape":"SchemaRegistryNameString", + "documentation":"

The name of the updated registry.

" + }, + "RegistryArn":{ + "shape":"GlueResourceArn", + "documentation":"

The Amazon Resource name (ARN) of the updated registry.

" + } + } + }, + "UpdateSchemaInput":{ + "type":"structure", + "required":["SchemaId"], + "members":{ + "SchemaId":{ + "shape":"SchemaId", + "documentation":"

This is a wrapper structure to contain schema identity fields. The structure contains:

  • SchemaId$SchemaArn: The Amazon Resource Name (ARN) of the schema. One of SchemaArn or SchemaName has to be provided.

  • SchemaId$SchemaName: The name of the schema. One of SchemaArn or SchemaName has to be provided.

" + }, + "SchemaVersionNumber":{ + "shape":"SchemaVersionNumber", + "documentation":"

Version number required for check pointing. One of VersionNumber or Compatibility has to be provided.

" + }, + "Compatibility":{ + "shape":"Compatibility", + "documentation":"

The new compatibility setting for the schema.

" + }, + "Description":{ + "shape":"DescriptionString", + "documentation":"

The new description for the schema.

" + } + } + }, + "UpdateSchemaResponse":{ + "type":"structure", + "members":{ + "SchemaArn":{ + "shape":"GlueResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the schema.

" + }, + "SchemaName":{ + "shape":"SchemaRegistryNameString", + "documentation":"

The name of the schema.

" + }, + "RegistryName":{ + "shape":"SchemaRegistryNameString", + "documentation":"

The name of the registry that contains the schema.

" + } + } + }, "UpdateTableRequest":{ "type":"structure", "required":[ @@ -10926,6 +12662,7 @@ }, "documentation":"

Specifies an XML classifier to be updated.

" }, + "UpdatedTimestamp":{"type":"string"}, "UriString":{"type":"string"}, "UserDefinedFunction":{ "type":"structure", @@ -11015,6 +12752,11 @@ "member":{"shape":"ValueString"} }, "VersionId":{"type":"long"}, + "VersionLongNumber":{ + "type":"long", + "max":100000, + "min":1 + }, "VersionMismatchException":{ "type":"structure", "members":{ @@ -11032,6 +12774,12 @@ "min":1, "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" }, + "VersionsString":{ + "type":"string", + "max":100000, + "min":1, + "pattern":"[1-9][0-9]*|[1-9][0-9]*-[1-9][0-9]*" + }, "ViewTextString":{ "type":"string", "max":409600 diff --git a/services/greengrass/pom.xml b/services/greengrass/pom.xml index 3607e93fe5fc..19ba4168935d 100644 --- a/services/greengrass/pom.xml +++ b/services/greengrass/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT greengrass AWS Java SDK :: Services :: AWS Greengrass diff --git a/services/groundstation/pom.xml b/services/groundstation/pom.xml index c0aa71e2b166..237ee6a7c52c 100644 --- a/services/groundstation/pom.xml +++ b/services/groundstation/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT groundstation AWS Java SDK :: Services :: GroundStation diff --git a/services/groundstation/src/main/resources/codegen-resources/service-2.json b/services/groundstation/src/main/resources/codegen-resources/service-2.json index ecf95b972999..905117189108 100644 --- a/services/groundstation/src/main/resources/codegen-resources/service-2.json +++ b/services/groundstation/src/main/resources/codegen-resources/service-2.json @@ -694,6 +694,7 @@ "enum":[ "AVAILABLE", "AWS_CANCELLED", + "AWS_FAILED", "CANCELLED", "CANCELLING", "COMPLETED", @@ -795,6 +796,10 @@ "type":"structure", "members":{ "destination":{"shape":"Destination"}, + "errorMessage":{ + "shape":"String", + "documentation":"

Error message for a dataflow.

" + }, "source":{"shape":"Source"} }, "documentation":"

Information about a dataflow edge used in a contact.

" diff --git a/services/guardduty/pom.xml b/services/guardduty/pom.xml index f0ec49bb487d..8ace7bfb7570 100644 --- a/services/guardduty/pom.xml +++ b/services/guardduty/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT 4.0.0 guardduty diff --git a/services/health/pom.xml b/services/health/pom.xml index dac3825aae31..14bd340aa4f7 100644 --- a/services/health/pom.xml +++ b/services/health/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT health AWS Java SDK :: Services :: AWS Health APIs and Notifications diff --git a/services/honeycode/pom.xml b/services/honeycode/pom.xml index 0e19fcabfc1d..7d4597a7e977 100644 --- a/services/honeycode/pom.xml +++ b/services/honeycode/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT honeycode AWS Java SDK :: Services :: Honeycode diff --git a/services/honeycode/src/main/resources/codegen-resources/paginators-1.json b/services/honeycode/src/main/resources/codegen-resources/paginators-1.json index 5677bd8e4a2d..19ba884c5e5c 100644 --- a/services/honeycode/src/main/resources/codegen-resources/paginators-1.json +++ b/services/honeycode/src/main/resources/codegen-resources/paginators-1.json @@ -1,4 +1,27 @@ { "pagination": { + "ListTableColumns": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "tableColumns" + }, + "ListTableRows": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "rows" + }, + "ListTables": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "tables" + }, + "QueryTableRows": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "rows" + } } } diff --git a/services/honeycode/src/main/resources/codegen-resources/service-2.json b/services/honeycode/src/main/resources/codegen-resources/service-2.json index 4ec967eb53b3..dfca581ebdd0 100644 --- a/services/honeycode/src/main/resources/codegen-resources/service-2.json +++ b/services/honeycode/src/main/resources/codegen-resources/service-2.json @@ -13,6 +13,102 @@ "uid":"honeycode-2020-03-01" }, "operations":{ + "BatchCreateTableRows":{ + "name":"BatchCreateTableRows", + "http":{ + "method":"POST", + "requestUri":"/workbooks/{workbookId}/tables/{tableId}/rows/batchcreate" + }, + "input":{"shape":"BatchCreateTableRowsRequest"}, + "output":{"shape":"BatchCreateTableRowsResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"RequestTimeoutException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

The BatchCreateTableRows API allows you to create one or more rows at the end of a table in a workbook. The API allows you to specify the values to set in some or all of the columns in the new rows.

If a column is not explicitly set in a specific row, then the column level formula specified in the table will be applied to the new row. If there is no column level formula but the last row of the table has a formula, then that formula will be copied down to the new row. If there is no column level formula and no formula in the last row of the table, then that column will be left blank for the new rows.

" + }, + "BatchDeleteTableRows":{ + "name":"BatchDeleteTableRows", + "http":{ + "method":"POST", + "requestUri":"/workbooks/{workbookId}/tables/{tableId}/rows/batchdelete" + }, + "input":{"shape":"BatchDeleteTableRowsRequest"}, + "output":{"shape":"BatchDeleteTableRowsResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ValidationException"}, + {"shape":"RequestTimeoutException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

The BatchDeleteTableRows API allows you to delete one or more rows from a table in a workbook. You need to specify the ids of the rows that you want to delete from the table.

" + }, + "BatchUpdateTableRows":{ + "name":"BatchUpdateTableRows", + "http":{ + "method":"POST", + "requestUri":"/workbooks/{workbookId}/tables/{tableId}/rows/batchupdate" + }, + "input":{"shape":"BatchUpdateTableRowsRequest"}, + "output":{"shape":"BatchUpdateTableRowsResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ValidationException"}, + {"shape":"RequestTimeoutException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

The BatchUpdateTableRows API allows you to update one or more rows in a table in a workbook.

You can specify the values to set in some or all of the columns in the table for the specified rows. If a column is not explicitly specified in a particular row, then that column will not be updated for that row. To clear out the data in a specific cell, you need to set the value as an empty string (\"\").

" + }, + "BatchUpsertTableRows":{ + "name":"BatchUpsertTableRows", + "http":{ + "method":"POST", + "requestUri":"/workbooks/{workbookId}/tables/{tableId}/rows/batchupsert" + }, + "input":{"shape":"BatchUpsertTableRowsRequest"}, + "output":{"shape":"BatchUpsertTableRowsResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"RequestTimeoutException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

The BatchUpsertTableRows API allows you to upsert one or more rows in a table. The upsert operation takes a filter expression as input and evaluates it to find matching rows on the destination table. If matching rows are found, it will update the cells in the matching rows to new values specified in the request. If no matching rows are found, a new row is added at the end of the table and the cells in that row are set to the new values specified in the request.

You can specify the values to set in some or all of the columns in the table for the matching or newly appended rows. If a column is not explicitly specified for a particular row, then that column will not be updated for that row. To clear out the data in a specific cell, you need to set the value as an empty string (\"\").

" + }, + "DescribeTableDataImportJob":{ + "name":"DescribeTableDataImportJob", + "http":{ + "method":"GET", + "requestUri":"/workbooks/{workbookId}/tables/{tableId}/import/{jobId}" + }, + "input":{"shape":"DescribeTableDataImportJobRequest"}, + "output":{"shape":"DescribeTableDataImportJobResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

The DescribeTableDataImportJob API allows you to retrieve the status and details of a table data import job.

" + }, "GetScreenData":{ "name":"GetScreenData", "http":{ @@ -52,6 +148,100 @@ {"shape":"RequestTimeoutException"} ], "documentation":"

The InvokeScreenAutomation API allows invoking an action defined in a screen in a Honeycode app. The API allows setting local variables, which can then be used in the automation being invoked. This allows automating the Honeycode app interactions to write, update or delete data in the workbook.

" + }, + "ListTableColumns":{ + "name":"ListTableColumns", + "http":{ + "method":"GET", + "requestUri":"/workbooks/{workbookId}/tables/{tableId}/columns" + }, + "input":{"shape":"ListTableColumnsRequest"}, + "output":{"shape":"ListTableColumnsResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"RequestTimeoutException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

The ListTableColumns API allows you to retrieve a list of all the columns in a table in a workbook.

" + }, + "ListTableRows":{ + "name":"ListTableRows", + "http":{ + "method":"POST", + "requestUri":"/workbooks/{workbookId}/tables/{tableId}/rows/list" + }, + "input":{"shape":"ListTableRowsRequest"}, + "output":{"shape":"ListTableRowsResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ValidationException"}, + {"shape":"RequestTimeoutException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

The ListTableRows API allows you to retrieve a list of all the rows in a table in a workbook.

" + }, + "ListTables":{ + "name":"ListTables", + "http":{ + "method":"GET", + "requestUri":"/workbooks/{workbookId}/tables" + }, + "input":{"shape":"ListTablesRequest"}, + "output":{"shape":"ListTablesResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"RequestTimeoutException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

The ListTables API allows you to retrieve a list of all the tables in a workbook.

" + }, + "QueryTableRows":{ + "name":"QueryTableRows", + "http":{ + "method":"POST", + "requestUri":"/workbooks/{workbookId}/tables/{tableId}/rows/query" + }, + "input":{"shape":"QueryTableRowsRequest"}, + "output":{"shape":"QueryTableRowsResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"RequestTimeoutException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

The QueryTableRows API allows you to use a filter formula to query for specific rows in a table.

" + }, + "StartTableDataImportJob":{ + "name":"StartTableDataImportJob", + "http":{ + "method":"POST", + "requestUri":"/workbooks/{workbookId}/tables/{tableId}/import" + }, + "input":{"shape":"StartTableDataImportJobRequest"}, + "output":{"shape":"StartTableDataImportJobResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

The StartTableDataImportJob API allows you to start an import job on a table. This API will only return the id of the job that was started. To find out the status of the import request, you need to call the DescribeTableDataImportJob API.

" } }, "shapes":{ @@ -60,7 +250,7 @@ "members":{ "message":{"shape":"ErrorMessage"} }, - "documentation":"

You do not have sufficient access to perform this action. Check that the workbook is owned by you and your IAM policy allows access to the screen/automation in the request.

", + "documentation":"

You do not have sufficient access to perform this action. Check that the workbook is owned by you and your IAM policy allows access to the resource in the request.

", "error":{"httpStatusCode":403}, "exception":true }, @@ -79,229 +269,963 @@ "message":{"shape":"ErrorMessage"} }, "documentation":"

The automation execution timed out.

", - "error":{"httpStatusCode":504}, + "error":{ + "httpStatusCode":504, + "senderFault":true + }, "exception":true }, - "ClientRequestToken":{ + "AwsUserArn":{ "type":"string", - "max":64, - "min":32 + "max":2048, + "min":20 }, - "ColumnMetadata":{ + "BatchCreateTableRowsRequest":{ "type":"structure", "required":[ - "name", - "format" + "workbookId", + "tableId", + "rowsToCreate" ], "members":{ - "name":{ - "shape":"Name", - "documentation":"

The name of the column.

" + "workbookId":{ + "shape":"ResourceId", + "documentation":"

The ID of the workbook where the new rows are being added.

If a workbook with the specified ID could not be found, this API throws ResourceNotFoundException.

", + "location":"uri", + "locationName":"workbookId" }, - "format":{ - "shape":"Format", - "documentation":"

The format of the column.

" + "tableId":{ + "shape":"ResourceId", + "documentation":"

The ID of the table where the new rows are being added.

If a table with the specified ID could not be found, this API throws ResourceNotFoundException.

", + "location":"uri", + "locationName":"tableId" + }, + "rowsToCreate":{ + "shape":"CreateRowDataList", + "documentation":"

The list of rows to create at the end of the table. Each item in this list needs to have a batch item id to uniquely identify the element in the request and the cells to create for that row. You need to specify at least one item in this list.

Note that if one of the column ids in any of the rows in the request does not exist in the table, then the request fails and no updates are made to the table.

" + }, + "clientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

The request token for performing the batch create operation. Request tokens help to identify duplicate requests. If a call times out or fails due to a transient error like a failed network connection, you can retry the call with the same request token. The service ensures that if the first call using that request token is successfully performed, the second call will not perform the operation again.

Note that request tokens are valid only for a few minutes. You cannot use request tokens to dedupe requests spanning hours or days.

" } - }, - "documentation":"

Metadata for column in the table.

" + } }, - "DataItem":{ + "BatchCreateTableRowsResult":{ "type":"structure", + "required":[ + "workbookCursor", + "createdRows" + ], "members":{ - "overrideFormat":{ - "shape":"Format", - "documentation":"

The overrideFormat is optional and is specified only if a particular row of data has a different format for the data than the default format defined on the screen or the table.

" + "workbookCursor":{ + "shape":"WorkbookCursor", + "documentation":"

The updated workbook cursor after adding the new rows at the end of the table.

" }, - "rawValue":{ - "shape":"RawValue", - "documentation":"

The raw value of the data. e.g. jsmith@example.com

" + "createdRows":{ + "shape":"CreatedRowsMap", + "documentation":"

The map of batch item id to the row id that was created for that item.

" }, - "formattedValue":{ - "shape":"FormattedValue", - "documentation":"

The formatted value of the data. e.g. John Smith.

" + "failedBatchItems":{ + "shape":"FailedBatchItems", + "documentation":"

The list of batch items in the request that could not be added to the table. Each element in this list contains one item from the request that could not be added to the table along with the reason why that item could not be added.

" } - }, - "documentation":"

The data in a particular data cell defined on the screen.

", - "sensitive":true + } }, - "DataItems":{ - "type":"list", - "member":{"shape":"DataItem"} + "BatchDeleteTableRowsRequest":{ + "type":"structure", + "required":[ + "workbookId", + "tableId", + "rowIds" + ], + "members":{ + "workbookId":{ + "shape":"ResourceId", + "documentation":"

The ID of the workbook where the rows are being deleted.

If a workbook with the specified id could not be found, this API throws ResourceNotFoundException.

", + "location":"uri", + "locationName":"workbookId" + }, + "tableId":{ + "shape":"ResourceId", + "documentation":"

The ID of the table where the rows are being deleted.

If a table with the specified id could not be found, this API throws ResourceNotFoundException.

", + "location":"uri", + "locationName":"tableId" + }, + "rowIds":{ + "shape":"RowIdList", + "documentation":"

The list of row ids to delete from the table. You need to specify at least one row id in this list.

Note that if one of the row ids provided in the request does not exist in the table, then the request fails and no rows are deleted from the table.

" + }, + "clientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

The request token for performing the delete action. Request tokens help to identify duplicate requests. If a call times out or fails due to a transient error like a failed network connection, you can retry the call with the same request token. The service ensures that if the first call using that request token is successfully performed, the second call will not perform the action again.

Note that request tokens are valid only for a few minutes. You cannot use request tokens to dedupe requests spanning hours or days.

" + } + } }, - "ErrorMessage":{"type":"string"}, - "Format":{ + "BatchDeleteTableRowsResult":{ + "type":"structure", + "required":["workbookCursor"], + "members":{ + "workbookCursor":{ + "shape":"WorkbookCursor", + "documentation":"

The updated workbook cursor after deleting the rows from the table.

" + }, + "failedBatchItems":{ + "shape":"FailedBatchItems", + "documentation":"

The list of row ids in the request that could not be deleted from the table. Each element in this list contains one row id from the request that could not be deleted along with the reason why that item could not be deleted.

" + } + } + }, + "BatchErrorMessage":{ "type":"string", - "enum":[ - "AUTO", - "NUMBER", - "CURRENCY", - "DATE", - "TIME", - "DATE_TIME", - "PERCENTAGE", - "TEXT", - "ACCOUNTING", - "CONTACT", - "ROWLINK" - ] + "pattern":"^(?!\\s*$).+" }, - "FormattedValue":{"type":"string"}, - "GetScreenDataRequest":{ + "BatchItemId":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^(?!\\s*$).+" + }, + "BatchUpdateTableRowsRequest":{ "type":"structure", "required":[ "workbookId", - "appId", - "screenId" + "tableId", + "rowsToUpdate" ], "members":{ "workbookId":{ "shape":"ResourceId", - "documentation":"

The ID of the workbook that contains the screen.

" + "documentation":"

The ID of the workbook where the rows are being updated.

If a workbook with the specified id could not be found, this API throws ResourceNotFoundException.

", + "location":"uri", + "locationName":"workbookId" }, - "appId":{ + "tableId":{ "shape":"ResourceId", - "documentation":"

The ID of the app that contains the screem.

" + "documentation":"

The ID of the table where the rows are being updated.

If a table with the specified id could not be found, this API throws ResourceNotFoundException.

", + "location":"uri", + "locationName":"tableId" }, - "screenId":{ + "rowsToUpdate":{ + "shape":"UpdateRowDataList", + "documentation":"

The list of rows to update in the table. Each item in this list needs to contain the row id to update along with the map of column id to cell values for each column in that row that needs to be updated. You need to specify at least one row in this list, and for each row, you need to specify at least one column to update.

Note that if one of the row or column ids in the request does not exist in the table, then the request fails and no updates are made to the table.

" + }, + "clientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

The request token for performing the update action. Request tokens help to identify duplicate requests. If a call times out or fails due to a transient error like a failed network connection, you can retry the call with the same request token. The service ensures that if the first call using that request token is successfully performed, the second call will not perform the action again.

Note that request tokens are valid only for a few minutes. You cannot use request tokens to dedupe requests spanning hours or days.

" + } + } + }, + "BatchUpdateTableRowsResult":{ + "type":"structure", + "required":["workbookCursor"], + "members":{ + "workbookCursor":{ + "shape":"WorkbookCursor", + "documentation":"

The updated workbook cursor after adding the new rows at the end of the table.

" + }, + "failedBatchItems":{ + "shape":"FailedBatchItems", + "documentation":"

The list of batch items in the request that could not be updated in the table. Each element in this list contains one item from the request that could not be updated in the table along with the reason why that item could not be updated.

" + } + } + }, + "BatchUpsertTableRowsRequest":{ + "type":"structure", + "required":[ + "workbookId", + "tableId", + "rowsToUpsert" + ], + "members":{ + "workbookId":{ + "shape":"ResourceId", + "documentation":"

The ID of the workbook where the rows are being upserted.

If a workbook with the specified id could not be found, this API throws ResourceNotFoundException.

", + "location":"uri", + "locationName":"workbookId" + }, + "tableId":{ + "shape":"ResourceId", + "documentation":"

The ID of the table where the rows are being upserted.

If a table with the specified id could not be found, this API throws ResourceNotFoundException.

", + "location":"uri", + "locationName":"tableId" + }, + "rowsToUpsert":{ + "shape":"UpsertRowDataList", + "documentation":"

The list of rows to upsert in the table. Each item in this list needs to have a batch item id to uniquely identify the element in the request, a filter expression to find the rows to update for that element and the cell values to set for each column in the upserted rows. You need to specify at least one item in this list.

Note that if one of the filter formulas in the request fails to evaluate because of an error or one of the column ids in any of the rows does not exist in the table, then the request fails and no updates are made to the table.

" + }, + "clientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

The request token for performing the update action. Request tokens help to identify duplicate requests. If a call times out or fails due to a transient error like a failed network connection, you can retry the call with the same request token. The service ensures that if the first call using that request token is successfully performed, the second call will not perform the action again.

Note that request tokens are valid only for a few minutes. You cannot use request tokens to dedupe requests spanning hours or days.

" + } + } + }, + "BatchUpsertTableRowsResult":{ + "type":"structure", + "required":[ + "rows", + "workbookCursor" + ], + "members":{ + "rows":{ + "shape":"UpsertRowsResultMap", + "documentation":"

A map with the batch item id as the key and the result of the upsert operation as the value. The result of the upsert operation specifies whether existing rows were updated or a new row was appended, along with the list of row ids that were affected.

" + }, + "workbookCursor":{ + "shape":"WorkbookCursor", + "documentation":"

The updated workbook cursor after updating or appending rows in the table.

" + }, + "failedBatchItems":{ + "shape":"FailedBatchItems", + "documentation":"

The list of batch items in the request that could not be updated or appended in the table. Each element in this list contains one item from the request that could not be updated in the table along with the reason why that item could not be updated or appended.

" + } + } + }, + "Cell":{ + "type":"structure", + "members":{ + "formula":{ + "shape":"Formula", + "documentation":"

The formula contained in the cell. This field is empty if a cell does not have a formula.

" + }, + "format":{ + "shape":"Format", + "documentation":"

The format of the cell. If this field is empty, then the format is either not specified in the workbook or the format is set to AUTO.

" + }, + "rawValue":{ + "shape":"RawValue", + "documentation":"

The raw value of the data contained in the cell. The raw value depends on the format of the data in the cell. However the attribute in the API return value is always a string containing the raw value.

Cells with format DATE, DATE_TIME or TIME have the raw value as a floating point number where the whole number represents the number of days since 1/1/1900 and the fractional part represents the fraction of the day since midnight. For example, a cell with date 11/3/2020 has the raw value \"44138\". A cell with the time 9:00 AM has the raw value \"0.375\" and a cell with date/time value of 11/3/2020 9:00 AM has the raw value \"44138.375\". Notice that even though the raw value is a number in all three cases, it is still represented as a string.

Cells with format NUMBER, CURRENCY, PERCENTAGE and ACCOUNTING have the raw value of the data as the number representing the data being displayed. For example, the number 1.325 with two decimal places in the format will have it's raw value as \"1.325\" and formatted value as \"1.33\". A currency value for $10 will have the raw value as \"10\" and formatted value as \"$10.00\". A value representing 20% with two decimal places in the format will have its raw value as \"0.2\" and the formatted value as \"20.00%\". An accounting value of -$25 will have \"-25\" as the raw value and \"$ (25.00)\" as the formatted value.

Cells with format TEXT will have the raw text as the raw value. For example, a cell with text \"John Smith\" will have \"John Smith\" as both the raw value and the formatted value.

Cells with format CONTACT will have the name of the contact as a formatted value and the email address of the contact as the raw value. For example, a contact for John Smith will have \"John Smith\" as the formatted value and \"john.smith@example.com\" as the raw value.

Cells with format ROWLINK (aka picklist) will have the first column of the linked row as the formatted value and the row id of the linked row as the raw value. For example, a cell containing a picklist to a table that displays task status might have \"Completed\" as the formatted value and \"row:dfcefaee-5b37-4355-8f28-40c3e4ff5dd4/ca432b2f-b8eb-431d-9fb5-cbe0342f9f03\" as the raw value.

Cells with format AUTO or cells without any format that are auto-detected as one of the formats above will contain the raw and formatted values as mentioned above, based on the auto-detected formats. If there is no auto-detected format, the raw and formatted values will be the same as the data in the cell.

" + }, + "formattedValue":{ + "shape":"FormattedValue", + "documentation":"

The formatted value of the cell. This is the value that you see displayed in the cell in the UI.

Note that the formatted value of a cell is always represented as a string irrespective of the data that is stored in the cell. For example, if a cell contains a date, the formatted value of the cell is the string representation of the formatted date being shown in the cell in the UI. See details in the rawValue field below for how cells of different formats will have different raw and formatted values.

" + } + }, + "documentation":"

An object that represents a single cell in a table.

", + "sensitive":true + }, + "CellInput":{ + "type":"structure", + "members":{ + "fact":{ + "shape":"Fact", + "documentation":"

Fact represents the data that is entered into a cell. This data can be free text or a formula. Formulas need to start with the equals (=) sign.

" + } + }, + "documentation":"

CellInput object contains the data needed to create or update cells in a table.

" + }, + "Cells":{ + "type":"list", + "member":{"shape":"Cell"} + }, + "ClientRequestToken":{ + "type":"string", + "max":64, + "min":32, + "pattern":"^(?!\\s*$).+" + }, + "ColumnMetadata":{ + "type":"structure", + "required":[ + "name", + "format" + ], + "members":{ + "name":{ + "shape":"Name", + "documentation":"

The name of the column.

" + }, + "format":{ + "shape":"Format", + "documentation":"

The format of the column.

" + } + }, + "documentation":"

Metadata for column in the table.

" + }, + "CreateRowData":{ + "type":"structure", + "required":[ + "batchItemId", + "cellsToCreate" + ], + "members":{ + "batchItemId":{ + "shape":"BatchItemId", + "documentation":"

An external identifier that represents the single row that is being created as part of the BatchCreateTableRows request. This can be any string that you can use to identify the row in the request. The BatchCreateTableRows API puts the batch item id in the results to allow you to link data in the request to data in the results.

" + }, + "cellsToCreate":{ + "shape":"RowDataInput", + "documentation":"

A map representing the cells to create in the new row. The key is the column id of the cell and the value is the CellInput object that represents the data to set in that cell.

" + } + }, + "documentation":"

Data needed to create a single row in a table as part of the BatchCreateTableRows request.

" + }, + "CreateRowDataList":{ + "type":"list", + "member":{"shape":"CreateRowData"}, + "max":100, + "min":1 + }, + "CreatedRowsMap":{ + "type":"map", + "key":{"shape":"BatchItemId"}, + "value":{"shape":"RowId"} + }, + "DataItem":{ + "type":"structure", + "members":{ + "overrideFormat":{ + "shape":"Format", + "documentation":"

The overrideFormat is optional and is specified only if a particular row of data has a different format for the data than the default format defined on the screen or the table.

" + }, + "rawValue":{ + "shape":"RawValue", + "documentation":"

The raw value of the data. e.g. jsmith@example.com

" + }, + "formattedValue":{ + "shape":"FormattedValue", + "documentation":"

The formatted value of the data. e.g. John Smith.

" + } + }, + "documentation":"

The data in a particular data cell defined on the screen.

", + "sensitive":true + }, + "DataItems":{ + "type":"list", + "member":{"shape":"DataItem"} + }, + "DelimitedTextDelimiter":{ + "type":"string", + "max":1, + "min":1, + "pattern":"^[^\\n\\r\\x00\\x08\\x0B\\x0C\\x0E\\x1F]?$" + }, + "DelimitedTextImportOptions":{ + "type":"structure", + "required":["delimiter"], + "members":{ + "delimiter":{ + "shape":"DelimitedTextDelimiter", + "documentation":"

The delimiter to use for separating columns in a single row of the input.

" + }, + "hasHeaderRow":{ + "shape":"HasHeaderRow", + "documentation":"

Indicates whether the input file has a header row at the top containing the column names.

" + }, + "ignoreEmptyRows":{ + "shape":"IgnoreEmptyRows", + "documentation":"

A parameter to indicate whether empty rows should be ignored or be included in the import.

" + }, + "dataCharacterEncoding":{ + "shape":"ImportDataCharacterEncoding", + "documentation":"

The encoding of the data in the input file.

" + } + }, + "documentation":"

An object that contains the options relating to parsing delimited text as part of an import request.

" + }, + "DescribeTableDataImportJobRequest":{ + "type":"structure", + "required":[ + "workbookId", + "tableId", + "jobId" + ], + "members":{ + "workbookId":{ + "shape":"ResourceId", + "documentation":"

The ID of the workbook into which data was imported.

If a workbook with the specified id could not be found, this API throws ResourceNotFoundException.

", + "location":"uri", + "locationName":"workbookId" + }, + "tableId":{ + "shape":"ResourceId", + "documentation":"

The ID of the table into which data was imported.

If a table with the specified id could not be found, this API throws ResourceNotFoundException.

", + "location":"uri", + "locationName":"tableId" + }, + "jobId":{ + "shape":"JobId", + "documentation":"

The ID of the job that was returned by the StartTableDataImportJob request.

If a job with the specified id could not be found, this API throws ResourceNotFoundException.

", + "location":"uri", + "locationName":"jobId" + } + } + }, + "DescribeTableDataImportJobResult":{ + "type":"structure", + "required":[ + "jobStatus", + "message", + "jobMetadata" + ], + "members":{ + "jobStatus":{ + "shape":"TableDataImportJobStatus", + "documentation":"

The current status of the import job.

" + }, + "message":{ + "shape":"TableDataImportJobMessage", + "documentation":"

A message providing more details about the current status of the import job.

" + }, + "jobMetadata":{ + "shape":"TableDataImportJobMetadata", + "documentation":"

The metadata about the job that was submitted for import.

" + } + } + }, + "DestinationOptions":{ + "type":"structure", + "members":{ + "columnMap":{ + "shape":"ImportColumnMap", + "documentation":"

A map of the column id to the import properties for each column.

" + } + }, + "documentation":"

An object that contains the options relating to the destination of the import request.

" + }, + "Email":{ + "type":"string", + "max":254, + "min":3, + "pattern":"^([a-zA-Z0-9_\\-\\.]+)@([a-zA-Z0-9_\\-\\.]+)\\.([a-zA-Z]{2,5})$", + "sensitive":true + }, + "ErrorMessage":{"type":"string"}, + "Fact":{ + "type":"string", + "max":8192, + "min":0, + "pattern":"[\\s\\S]*", + "sensitive":true + }, + "FailedBatchItem":{ + "type":"structure", + "required":[ + "id", + "errorMessage" + ], + "members":{ + "id":{ + "shape":"BatchItemId", + "documentation":"

The id of the batch item that failed. This is the batch item id for the BatchCreateTableRows and BatchUpsertTableRows operations and the row id for the BatchUpdateTableRows and BatchDeleteTableRows operations.

" + }, + "errorMessage":{ + "shape":"BatchErrorMessage", + "documentation":"

The error message that indicates why the batch item failed.

" + } + }, + "documentation":"

A single item in a batch that failed to perform the intended action because of an error preventing it from succeeding.

" + }, + "FailedBatchItems":{ + "type":"list", + "member":{"shape":"FailedBatchItem"}, + "max":100, + "min":0 + }, + "Filter":{ + "type":"structure", + "required":["formula"], + "members":{ + "formula":{ + "shape":"Formula", + "documentation":"

A formula representing a filter function that returns zero or more matching rows from a table. Valid formulas in this field return a list of rows from a table. The most common ways of writing a formula to return a list of rows are to use the FindRow() or Filter() functions. Any other formula that returns zero or more rows is also acceptable. For example, you can use a formula that points to a cell that contains a filter function.

" + }, + "contextRowId":{ + "shape":"RowId", + "documentation":"

The optional contextRowId attribute can be used to specify the row id of the context row if the filter formula contains unqualified references to table columns and needs a context row to evaluate them successfully.

" + } + }, + "documentation":"

An object that represents a filter formula along with the id of the context row under which the filter function needs to evaluate.

" + }, + "Format":{ + "type":"string", + "enum":[ + "AUTO", + "NUMBER", + "CURRENCY", + "DATE", + "TIME", + "DATE_TIME", + "PERCENTAGE", + "TEXT", + "ACCOUNTING", + "CONTACT", + "ROWLINK" + ] + }, + "FormattedValue":{ + "type":"string", + "max":8192, + "min":0, + "pattern":"[\\s\\S]*" + }, + "Formula":{ + "type":"string", + "max":8192, + "min":0, + "pattern":"^=.*", + "sensitive":true + }, + "GetScreenDataRequest":{ + "type":"structure", + "required":[ + "workbookId", + "appId", + "screenId" + ], + "members":{ + "workbookId":{ + "shape":"ResourceId", + "documentation":"

The ID of the workbook that contains the screen.

" + }, + "appId":{ + "shape":"ResourceId", + "documentation":"

The ID of the app that contains the screem.

" + }, + "screenId":{ + "shape":"ResourceId", + "documentation":"

The ID of the screen.

" + }, + "variables":{ + "shape":"VariableValueMap", + "documentation":"

Variables are optional and are needed only if the screen requires them to render correctly. Variables are specified as a map where the key is the name of the variable as defined on the screen. The value is an object which currently has only one property, rawValue, which holds the value of the variable to be passed to the screen.

" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The number of results to be returned on a single page. Specify a number between 1 and 100. The maximum value is 100.

This parameter is optional. If you don't specify this parameter, the default page size is 100.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

This parameter is optional. If a nextToken is not specified, the API returns the first page of data.

Pagination tokens expire after 1 hour. If you use a token that was returned more than an hour back, the API will throw ValidationException.

" + } + } + }, + "GetScreenDataResult":{ + "type":"structure", + "required":[ + "results", + "workbookCursor" + ], + "members":{ + "results":{ + "shape":"ResultSetMap", + "documentation":"

A map of all the rows on the screen keyed by block name.

" + }, + "workbookCursor":{ + "shape":"WorkbookCursor", + "documentation":"

Indicates the cursor of the workbook at which the data returned by this workbook is read. Workbook cursor keeps increasing with every update and the increments are not sequential.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

Provides the pagination token to load the next page if there are more results matching the request. If a pagination token is not present in the response, it means that all data matching the query has been loaded.

" + } + } + }, + "HasHeaderRow":{"type":"boolean"}, + "IgnoreEmptyRows":{"type":"boolean"}, + "ImportColumnMap":{ + "type":"map", + "key":{"shape":"ResourceId"}, + "value":{"shape":"SourceDataColumnProperties"}, + "max":100 + }, + "ImportDataCharacterEncoding":{ + "type":"string", + "enum":[ + "UTF-8", + "US-ASCII", + "ISO-8859-1", + "UTF-16BE", + "UTF-16LE", + "UTF-16" + ] + }, + "ImportDataSource":{ + "type":"structure", + "required":["dataSourceConfig"], + "members":{ + "dataSourceConfig":{ + "shape":"ImportDataSourceConfig", + "documentation":"

The configuration parameters for the data source of the import

" + } + }, + "documentation":"

An object that has details about the source of the data that was submitted for import.

" + }, + "ImportDataSourceConfig":{ + "type":"structure", + "members":{ + "dataSourceUrl":{ + "shape":"SecureURL", + "documentation":"

The URL from which source data will be downloaded for the import request.

" + } + }, + "documentation":"

An object that contains the configuration parameters for the data source of an import request.

" + }, + "ImportJobSubmitter":{ + "type":"structure", + "members":{ + "email":{ + "shape":"Email", + "documentation":"

The email id of the submitter of the import job, if available.

" + }, + "userArn":{ + "shape":"AwsUserArn", + "documentation":"

The AWS user ARN of the submitter of the import job, if available.

" + } + }, + "documentation":"

An object that contains the attributes of the submitter of the import job.

" + }, + "ImportOptions":{ + "type":"structure", + "members":{ + "destinationOptions":{ + "shape":"DestinationOptions", + "documentation":"

Options relating to the destination of the import request.

" + }, + "delimitedTextOptions":{ + "shape":"DelimitedTextImportOptions", + "documentation":"

Options relating to parsing delimited text. Required if dataFormat is DELIMITED_TEXT.

" + } + }, + "documentation":"

An object that contains the options specified by the sumitter of the import request.

" + }, + "ImportSourceDataFormat":{ + "type":"string", + "enum":["DELIMITED_TEXT"] + }, + "InternalServerException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

There were unexpected errors from the server.

", + "error":{"httpStatusCode":500}, + "exception":true + }, + "InvokeScreenAutomationRequest":{ + "type":"structure", + "required":[ + "workbookId", + "appId", + "screenId", + "screenAutomationId" + ], + "members":{ + "workbookId":{ + "shape":"ResourceId", + "documentation":"

The ID of the workbook that contains the screen automation.

", + "location":"uri", + "locationName":"workbookId" + }, + "appId":{ + "shape":"ResourceId", + "documentation":"

The ID of the app that contains the screen automation.

", + "location":"uri", + "locationName":"appId" + }, + "screenId":{ + "shape":"ResourceId", + "documentation":"

The ID of the screen that contains the screen automation.

", + "location":"uri", + "locationName":"screenId" + }, + "screenAutomationId":{ + "shape":"ResourceId", + "documentation":"

The ID of the automation action to be performed.

", + "location":"uri", + "locationName":"automationId" + }, + "variables":{ + "shape":"VariableValueMap", + "documentation":"

Variables are specified as a map where the key is the name of the variable as defined on the screen. The value is an object which currently has only one property, rawValue, which holds the value of the variable to be passed to the screen. Any variables defined in a screen are required to be passed in the call.

" + }, + "rowId":{ + "shape":"RowId", + "documentation":"

The row ID for the automation if the automation is defined inside a block with source or list.

" + }, + "clientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

The request token for performing the automation action. Request tokens help to identify duplicate requests. If a call times out or fails due to a transient error like a failed network connection, you can retry the call with the same request token. The service ensures that if the first call using that request token is successfully performed, the second call will return the response of the previous call rather than performing the action again.

Note that request tokens are valid only for a few minutes. You cannot use request tokens to dedupe requests spanning hours or days.

" + } + } + }, + "InvokeScreenAutomationResult":{ + "type":"structure", + "required":["workbookCursor"], + "members":{ + "workbookCursor":{ + "shape":"WorkbookCursor", + "documentation":"

The updated workbook cursor after performing the automation action.

" + } + } + }, + "JobId":{ + "type":"string", + "max":100, + "min":1, + "pattern":"^[^\\n\\r\\x00\\x08\\x0B\\x0C\\x0E\\x1F]*$" + }, + "ListTableColumnsRequest":{ + "type":"structure", + "required":[ + "workbookId", + "tableId" + ], + "members":{ + "workbookId":{ + "shape":"ResourceId", + "documentation":"

The ID of the workbook that contains the table whose columns are being retrieved.

If a workbook with the specified id could not be found, this API throws ResourceNotFoundException.

", + "location":"uri", + "locationName":"workbookId" + }, + "tableId":{ + "shape":"ResourceId", + "documentation":"

The ID of the table whose columns are being retrieved.

If a table with the specified id could not be found, this API throws ResourceNotFoundException.

", + "location":"uri", + "locationName":"tableId" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

This parameter is optional. If a nextToken is not specified, the API returns the first page of data.

Pagination tokens expire after 1 hour. If you use a token that was returned more than an hour back, the API will throw ValidationException.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListTableColumnsResult":{ + "type":"structure", + "required":["tableColumns"], + "members":{ + "tableColumns":{ + "shape":"TableColumns", + "documentation":"

The list of columns in the table.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

Provides the pagination token to load the next page if there are more results matching the request. If a pagination token is not present in the response, it means that all data matching the request has been loaded.

" + }, + "workbookCursor":{ + "shape":"WorkbookCursor", + "documentation":"

Indicates the cursor of the workbook at which the data returned by this request is read. Workbook cursor keeps increasing with every update and the increments are not sequential.

" + } + } + }, + "ListTableRowsRequest":{ + "type":"structure", + "required":[ + "workbookId", + "tableId" + ], + "members":{ + "workbookId":{ + "shape":"ResourceId", + "documentation":"

The ID of the workbook that contains the table whose rows are being retrieved.

If a workbook with the specified id could not be found, this API throws ResourceNotFoundException.

", + "location":"uri", + "locationName":"workbookId" + }, + "tableId":{ + "shape":"ResourceId", + "documentation":"

The ID of the table whose rows are being retrieved.

If a table with the specified id could not be found, this API throws ResourceNotFoundException.

", + "location":"uri", + "locationName":"tableId" + }, + "rowIds":{ + "shape":"RowIdList", + "documentation":"

This parameter is optional. If one or more row ids are specified in this list, then only the specified row ids are returned in the result. If no row ids are specified here, then all the rows in the table are returned.

" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of rows to return in each page of the results.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

This parameter is optional. If a nextToken is not specified, the API returns the first page of data.

Pagination tokens expire after 1 hour. If you use a token that was returned more than an hour back, the API will throw ValidationException.

" + } + } + }, + "ListTableRowsResult":{ + "type":"structure", + "required":[ + "columnIds", + "rows", + "workbookCursor" + ], + "members":{ + "columnIds":{ + "shape":"ResourceIds", + "documentation":"

The list of columns in the table whose row data is returned in the result.

" + }, + "rows":{ + "shape":"TableRows", + "documentation":"

The list of rows in the table. Note that this result is paginated, so this list contains a maximum of 100 rows.

" + }, + "rowIdsNotFound":{ + "shape":"RowIdList", + "documentation":"

The list of row ids included in the request that were not found in the table.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

Provides the pagination token to load the next page if there are more results matching the request. If a pagination token is not present in the response, it means that all data matching the request has been loaded.

" + }, + "workbookCursor":{ + "shape":"WorkbookCursor", + "documentation":"

Indicates the cursor of the workbook at which the data returned by this request is read. Workbook cursor keeps increasing with every update and the increments are not sequential.

" + } + } + }, + "ListTablesRequest":{ + "type":"structure", + "required":["workbookId"], + "members":{ + "workbookId":{ "shape":"ResourceId", - "documentation":"

The ID of the screen.

" - }, - "variables":{ - "shape":"VariableValueMap", - "documentation":"

Variables are optional and are needed only if the screen requires them to render correctly. Variables are specified as a map where the key is the name of the variable as defined on the screen. The value is an object which currently has only one property, rawValue, which holds the value of the variable to be passed to the screen.

" + "documentation":"

The ID of the workbook whose tables are being retrieved.

If a workbook with the specified id could not be found, this API throws ResourceNotFoundException.

", + "location":"uri", + "locationName":"workbookId" }, "maxResults":{ "shape":"MaxResults", - "documentation":"

The number of results to be returned on a single page. Specify a number between 1 and 100. The maximum value is 100.

This parameter is optional. If you don't specify this parameter, the default page size is 100.

" + "documentation":"

The maximum number of tables to return in each page of the results.

", + "location":"querystring", + "locationName":"maxResults" }, "nextToken":{ "shape":"PaginationToken", - "documentation":"

This parameter is optional. If a nextToken is not specified, the API returns the first page of data.

Pagination tokens expire after 1 hour. If you use a token that was returned more than an hour back, the API will throw ValidationException.

" + "documentation":"

This parameter is optional. If a nextToken is not specified, the API returns the first page of data.

Pagination tokens expire after 1 hour. If you use a token that was returned more than an hour back, the API will throw ValidationException.

", + "location":"querystring", + "locationName":"nextToken" } } }, - "GetScreenDataResult":{ + "ListTablesResult":{ "type":"structure", - "required":[ - "results", - "workbookCursor" - ], + "required":["tables"], "members":{ - "results":{ - "shape":"ResultSetMap", - "documentation":"

A map of all the rows on the screen keyed by block name.

" - }, - "workbookCursor":{ - "shape":"WorkbookCursor", - "documentation":"

Indicates the cursor of the workbook at which the data returned by this workbook is read. Workbook cursor keeps increasing with every update and the increments are not sequential.

" + "tables":{ + "shape":"Tables", + "documentation":"

The list of tables in the workbook.

" }, "nextToken":{ "shape":"PaginationToken", - "documentation":"

Provides the pagination token to load the next page if there are more results matching the request. If a pagination token is not present in the response, it means that all data matching the query has been loaded.

" + "documentation":"

Provides the pagination token to load the next page if there are more results matching the request. If a pagination token is not present in the response, it means that all data matching the request has been loaded.

" + }, + "workbookCursor":{ + "shape":"WorkbookCursor", + "documentation":"

Indicates the cursor of the workbook at which the data returned by this request is read. Workbook cursor keeps increasing with every update and the increments are not sequential.

" } } }, - "InternalServerException":{ - "type":"structure", - "members":{ - "message":{"shape":"ErrorMessage"} - }, - "documentation":"

There were unexpected errors from the server.

", - "error":{"httpStatusCode":500}, - "exception":true + "MaxResults":{ + "type":"integer", + "box":true, + "max":100, + "min":1 }, - "InvokeScreenAutomationRequest":{ + "Name":{ + "type":"string", + "sensitive":true + }, + "PaginationToken":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"^(?!\\s*$).+" + }, + "QueryTableRowsRequest":{ "type":"structure", "required":[ "workbookId", - "appId", - "screenId", - "screenAutomationId" + "tableId", + "filterFormula" ], "members":{ "workbookId":{ "shape":"ResourceId", - "documentation":"

The ID of the workbook that contains the screen automation.

", + "documentation":"

The ID of the workbook whose table rows are being queried.

If a workbook with the specified id could not be found, this API throws ResourceNotFoundException.

", "location":"uri", "locationName":"workbookId" }, - "appId":{ - "shape":"ResourceId", - "documentation":"

The ID of the app that contains the screen automation.

", - "location":"uri", - "locationName":"appId" - }, - "screenId":{ - "shape":"ResourceId", - "documentation":"

The ID of the screen that contains the screen automation.

", - "location":"uri", - "locationName":"screenId" - }, - "screenAutomationId":{ + "tableId":{ "shape":"ResourceId", - "documentation":"

The ID of the automation action to be performed.

", + "documentation":"

The ID of the table whose rows are being queried.

If a table with the specified id could not be found, this API throws ResourceNotFoundException.

", "location":"uri", - "locationName":"automationId" + "locationName":"tableId" }, - "variables":{ - "shape":"VariableValueMap", - "documentation":"

Variables are optional and are needed only if the screen requires them to render correctly. Variables are specified as a map where the key is the name of the variable as defined on the screen. The value is an object which currently has only one property, rawValue, which holds the value of the variable to be passed to the screen.

" + "filterFormula":{ + "shape":"Filter", + "documentation":"

An object that represents a filter formula along with the id of the context row under which the filter function needs to evaluate.

" }, - "rowId":{ - "shape":"RowId", - "documentation":"

The row ID for the automation if the automation is defined inside a block with source or list.

" + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of rows to return in each page of the results.

" }, - "clientRequestToken":{ - "shape":"ClientRequestToken", - "documentation":"

The request token for performing the automation action. Request tokens help to identify duplicate requests. If a call times out or fails due to a transient error like a failed network connection, you can retry the call with the same request token. The service ensures that if the first call using that request token is successfully performed, the second call will return the response of the previous call rather than performing the action again.

Note that request tokens are valid only for a few minutes. You cannot use request tokens to dedupe requests spanning hours or days.

" + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

This parameter is optional. If a nextToken is not specified, the API returns the first page of data.

Pagination tokens expire after 1 hour. If you use a token that was returned more than an hour back, the API will throw ValidationException.

" } } }, - "InvokeScreenAutomationResult":{ + "QueryTableRowsResult":{ "type":"structure", - "required":["workbookCursor"], + "required":[ + "columnIds", + "rows", + "workbookCursor" + ], "members":{ + "columnIds":{ + "shape":"ResourceIds", + "documentation":"

The list of columns in the table whose row data is returned in the result.

" + }, + "rows":{ + "shape":"TableRows", + "documentation":"

The list of rows in the table that match the query filter.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

Provides the pagination token to load the next page if there are more results matching the request. If a pagination token is not present in the response, it means that all data matching the request has been loaded.

" + }, "workbookCursor":{ "shape":"WorkbookCursor", - "documentation":"

The updated workbook cursor after performing the automation action.

" + "documentation":"

Indicates the cursor of the workbook at which the data returned by this request is read. Workbook cursor keeps increasing with every update and the increments are not sequential.

" } } }, - "MaxResults":{ - "type":"integer", - "box":true, - "max":100, - "min":1 - }, - "Name":{ - "type":"string", - "sensitive":true - }, - "PaginationToken":{ + "RawValue":{ "type":"string", - "max":1024, - "min":1 + "max":32767, + "min":0, + "pattern":"[\\s\\S]*" }, - "RawValue":{"type":"string"}, "RequestTimeoutException":{ "type":"structure", "members":{ "message":{"shape":"ErrorMessage"} }, "documentation":"

The request timed out.

", - "error":{"httpStatusCode":504}, + "error":{ + "httpStatusCode":504, + "senderFault":true + }, "exception":true }, "ResourceId":{ "type":"string", + "max":36, + "min":36, "pattern":"[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}" }, + "ResourceIds":{ + "type":"list", + "member":{"shape":"ResourceId"}, + "max":100, + "min":1 + }, "ResourceNotFoundException":{ "type":"structure", "members":{ "message":{"shape":"ErrorMessage"} }, - "documentation":"

A Workbook, App, Screen or Screen Automation was not found with the given ID.

", + "documentation":"

A Workbook, Table, App, Screen or Screen Automation was not found with the given ID.

", "error":{"httpStatusCode":404}, "exception":true }, @@ -351,10 +1275,41 @@ "key":{"shape":"Name"}, "value":{"shape":"ResultSet"} }, + "RowDataInput":{ + "type":"map", + "key":{"shape":"ResourceId"}, + "value":{"shape":"CellInput"}, + "max":100, + "min":1 + }, "RowId":{ "type":"string", + "max":77, + "min":77, "pattern":"row:[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}\\/[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}" }, + "RowIdList":{ + "type":"list", + "member":{"shape":"RowId"}, + "max":100, + "min":1 + }, + "SecureURL":{ + "type":"string", + "max":8000, + "min":1, + "pattern":"^https:\\/\\/[^\\n\\r\\x00\\x08\\x0B\\x0C\\x0E\\x1F]*$" + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

The request caused service quota to be breached.

", + "error":{"httpStatusCode":402}, + "exception":true + }, "ServiceUnavailableException":{ "type":"structure", "members":{ @@ -364,6 +1319,180 @@ "error":{"httpStatusCode":503}, "exception":true }, + "SourceDataColumnIndex":{ + "type":"integer", + "min":1 + }, + "SourceDataColumnProperties":{ + "type":"structure", + "members":{ + "columnIndex":{ + "shape":"SourceDataColumnIndex", + "documentation":"

The index of the column in the input file.

" + } + }, + "documentation":"

An object that contains the properties for importing data to a specific column in a table.

" + }, + "StartTableDataImportJobRequest":{ + "type":"structure", + "required":[ + "workbookId", + "dataSource", + "dataFormat", + "destinationTableId", + "importOptions", + "clientRequestToken" + ], + "members":{ + "workbookId":{ + "shape":"ResourceId", + "documentation":"

The ID of the workbook where the rows are being imported.

If a workbook with the specified id could not be found, this API throws ResourceNotFoundException.

", + "location":"uri", + "locationName":"workbookId" + }, + "dataSource":{ + "shape":"ImportDataSource", + "documentation":"

The source of the data that is being imported. The size of source must be no larger than 100 MB. Source must have no more than 100,000 cells and no more than 1,000 rows.

" + }, + "dataFormat":{ + "shape":"ImportSourceDataFormat", + "documentation":"

The format of the data that is being imported. Currently the only option supported is \"DELIMITED_TEXT\".

" + }, + "destinationTableId":{ + "shape":"ResourceId", + "documentation":"

The ID of the table where the rows are being imported.

If a table with the specified id could not be found, this API throws ResourceNotFoundException.

", + "location":"uri", + "locationName":"tableId" + }, + "importOptions":{ + "shape":"ImportOptions", + "documentation":"

The options for customizing this import request.

" + }, + "clientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

The request token for performing the update action. Request tokens help to identify duplicate requests. If a call times out or fails due to a transient error like a failed network connection, you can retry the call with the same request token. The service ensures that if the first call using that request token is successfully performed, the second call will not perform the action again.

Note that request tokens are valid only for a few minutes. You cannot use request tokens to dedupe requests spanning hours or days.

" + } + } + }, + "StartTableDataImportJobResult":{ + "type":"structure", + "required":[ + "jobId", + "jobStatus" + ], + "members":{ + "jobId":{ + "shape":"JobId", + "documentation":"

The id that is assigned to this import job. Future requests to find out the status of this import job need to send this id in the appropriate parameter in the request.

" + }, + "jobStatus":{ + "shape":"TableDataImportJobStatus", + "documentation":"

The status of the import job immediately after submitting the request.

" + } + } + }, + "Table":{ + "type":"structure", + "members":{ + "tableId":{ + "shape":"ResourceId", + "documentation":"

The id of the table.

" + }, + "tableName":{ + "shape":"TableName", + "documentation":"

The name of the table.

" + } + }, + "documentation":"

An object representing the properties of a table in a workbook.

" + }, + "TableColumn":{ + "type":"structure", + "members":{ + "tableColumnId":{ + "shape":"ResourceId", + "documentation":"

The id of the column in the table.

" + }, + "tableColumnName":{ + "shape":"TableColumnName", + "documentation":"

The name of the column in the table.

" + }, + "format":{ + "shape":"Format", + "documentation":"

The column level format that is applied in the table. An empty value in this field means that the column format is the default value 'AUTO'.

" + } + }, + "documentation":"

An object that contains attributes about a single column in a table

" + }, + "TableColumnName":{"type":"string"}, + "TableColumns":{ + "type":"list", + "member":{"shape":"TableColumn"} + }, + "TableDataImportJobMessage":{"type":"string"}, + "TableDataImportJobMetadata":{ + "type":"structure", + "required":[ + "submitter", + "submitTime", + "importOptions", + "dataSource" + ], + "members":{ + "submitter":{ + "shape":"ImportJobSubmitter", + "documentation":"

Details about the submitter of the import request.

" + }, + "submitTime":{ + "shape":"TimestampInMillis", + "documentation":"

The timestamp when the job was submitted for import.

" + }, + "importOptions":{ + "shape":"ImportOptions", + "documentation":"

The options that was specified at the time of submitting the import request.

" + }, + "dataSource":{ + "shape":"ImportDataSource", + "documentation":"

The source of the data that was submitted for import.

" + } + }, + "documentation":"

The metadata associated with the table data import job that was submitted.

" + }, + "TableDataImportJobStatus":{ + "type":"string", + "enum":[ + "SUBMITTED", + "IN_PROGRESS", + "COMPLETED", + "FAILED" + ] + }, + "TableName":{"type":"string"}, + "TableRow":{ + "type":"structure", + "required":[ + "rowId", + "cells" + ], + "members":{ + "rowId":{ + "shape":"RowId", + "documentation":"

The id of the row in the table.

" + }, + "cells":{ + "shape":"Cells", + "documentation":"

A list of cells in the table row. The cells appear in the same order as the columns of the table.

" + } + }, + "documentation":"

An object that contains attributes about a single row in a table

" + }, + "TableRows":{ + "type":"list", + "member":{"shape":"TableRow"} + }, + "Tables":{ + "type":"list", + "member":{"shape":"Table"} + }, "ThrottlingException":{ "type":"structure", "members":{ @@ -373,6 +1502,88 @@ "error":{"httpStatusCode":429}, "exception":true }, + "TimestampInMillis":{"type":"timestamp"}, + "UpdateRowData":{ + "type":"structure", + "required":[ + "rowId", + "cellsToUpdate" + ], + "members":{ + "rowId":{ + "shape":"RowId", + "documentation":"

The id of the row that needs to be updated.

" + }, + "cellsToUpdate":{ + "shape":"RowDataInput", + "documentation":"

A map representing the cells to update in the given row. The key is the column id of the cell and the value is the CellInput object that represents the data to set in that cell.

" + } + }, + "documentation":"

Data needed to create a single row in a table as part of the BatchCreateTableRows request.

" + }, + "UpdateRowDataList":{ + "type":"list", + "member":{"shape":"UpdateRowData"}, + "max":100, + "min":1 + }, + "UpsertAction":{ + "type":"string", + "enum":[ + "UPDATED", + "APPENDED" + ] + }, + "UpsertRowData":{ + "type":"structure", + "required":[ + "batchItemId", + "filter", + "cellsToUpdate" + ], + "members":{ + "batchItemId":{ + "shape":"BatchItemId", + "documentation":"

An external identifier that represents a single item in the request that is being upserted as part of the BatchUpsertTableRows request. This can be any string that you can use to identify the item in the request. The BatchUpsertTableRows API puts the batch item id in the results to allow you to link data in the request to data in the results.

" + }, + "filter":{ + "shape":"Filter", + "documentation":"

The filter formula to use to find existing matching rows to update. The formula needs to return zero or more rows. If the formula returns 0 rows, then a new row will be appended in the target table. If the formula returns one or more rows, then the returned rows will be updated.

Note that the filter formula needs to return rows from the target table for the upsert operation to succeed. If the filter formula has a syntax error or it doesn't evaluate to zero or more rows in the target table for any one item in the input list, then the entire BatchUpsertTableRows request fails and no updates are made to the table.

" + }, + "cellsToUpdate":{ + "shape":"RowDataInput", + "documentation":"

A map representing the cells to update for the matching rows or an appended row. The key is the column id of the cell and the value is the CellInput object that represents the data to set in that cell.

" + } + }, + "documentation":"

Data needed to upsert rows in a table as part of a single item in the BatchUpsertTableRows request.

" + }, + "UpsertRowDataList":{ + "type":"list", + "member":{"shape":"UpsertRowData"} + }, + "UpsertRowsResult":{ + "type":"structure", + "required":[ + "rowIds", + "upsertAction" + ], + "members":{ + "rowIds":{ + "shape":"RowIdList", + "documentation":"

The list of row ids that were changed as part of an upsert row operation. If the upsert resulted in an update, this list could potentially contain multiple rows that matched the filter and hence got updated. If the upsert resulted in an append, this list would only have the single row that was appended.

" + }, + "upsertAction":{ + "shape":"UpsertAction", + "documentation":"

The result of the upsert action.

" + } + }, + "documentation":"

An object that represents the result of a single upsert row request.

" + }, + "UpsertRowsResultMap":{ + "type":"map", + "key":{"shape":"BatchItemId"}, + "value":{"shape":"UpsertRowsResult"} + }, "ValidationException":{ "type":"structure", "required":["message"], @@ -385,6 +1596,7 @@ }, "VariableName":{ "type":"string", + "pattern":"^(?!\\s*$).+", "sensitive":true }, "VariableValue":{ diff --git a/services/iam/pom.xml b/services/iam/pom.xml index 3068e5d65c7c..d8a58ceec580 100644 --- a/services/iam/pom.xml +++ b/services/iam/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT iam AWS Java SDK :: Services :: AWS IAM diff --git a/services/identitystore/pom.xml b/services/identitystore/pom.xml index 4aaf3619a799..4412070880e4 100644 --- a/services/identitystore/pom.xml +++ b/services/identitystore/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT identitystore AWS Java SDK :: Services :: Identitystore diff --git a/services/imagebuilder/pom.xml b/services/imagebuilder/pom.xml index ea6fd1a64435..12ba09bc6171 100644 --- a/services/imagebuilder/pom.xml +++ b/services/imagebuilder/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT imagebuilder AWS Java SDK :: Services :: Imagebuilder diff --git a/services/imagebuilder/src/main/resources/codegen-resources/service-2.json b/services/imagebuilder/src/main/resources/codegen-resources/service-2.json index 4ba72b173d63..0df5d37db0d0 100644 --- a/services/imagebuilder/src/main/resources/codegen-resources/service-2.json +++ b/services/imagebuilder/src/main/resources/codegen-resources/service-2.json @@ -833,7 +833,7 @@ "AccountList":{ "type":"list", "member":{"shape":"AccountId"}, - "max":50, + "max":1536, "min":1 }, "Ami":{ @@ -853,7 +853,7 @@ }, "description":{ "shape":"NonEmptyString", - "documentation":"

The description of the EC2 AMI.

" + "documentation":"

The description of the EC2 AMI. Minimum and maximum length are in characters.

" }, "state":{"shape":"ImageState"}, "accountId":{ @@ -872,7 +872,7 @@ }, "description":{ "shape":"NonEmptyString", - "documentation":"

The description of the distribution configuration.

" + "documentation":"

The description of the distribution configuration. Minimum and maximum length are in characters.

" }, "targetAccountIds":{ "shape":"AccountList", @@ -2868,7 +2868,7 @@ "box":true }, "nextToken":{ - "shape":"NonEmptyString", + "shape":"PaginationToken", "documentation":"

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

" } } @@ -2885,7 +2885,7 @@ "documentation":"

The list of component summaries for the specified semantic version.

" }, "nextToken":{ - "shape":"NonEmptyString", + "shape":"PaginationToken", "documentation":"

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

" } } @@ -2907,7 +2907,7 @@ "box":true }, "nextToken":{ - "shape":"NonEmptyString", + "shape":"PaginationToken", "documentation":"

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

" } } @@ -2924,7 +2924,7 @@ "documentation":"

The list of component semantic versions.

" }, "nextToken":{ - "shape":"NonEmptyString", + "shape":"PaginationToken", "documentation":"

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

" } } @@ -2942,7 +2942,7 @@ "box":true }, "nextToken":{ - "shape":"NonEmptyString", + "shape":"PaginationToken", "documentation":"

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

" } } @@ -2959,7 +2959,7 @@ "documentation":"

The list of distributions.

" }, "nextToken":{ - "shape":"NonEmptyString", + "shape":"PaginationToken", "documentation":"

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

" } } @@ -2982,7 +2982,7 @@ "box":true }, "nextToken":{ - "shape":"NonEmptyString", + "shape":"PaginationToken", "documentation":"

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

" } } @@ -2999,7 +2999,7 @@ "documentation":"

The list of image build versions.

" }, "nextToken":{ - "shape":"NonEmptyString", + "shape":"PaginationToken", "documentation":"

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

" } } @@ -3022,7 +3022,7 @@ "box":true }, "nextToken":{ - "shape":"NonEmptyString", + "shape":"PaginationToken", "documentation":"

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

" } } @@ -3039,7 +3039,7 @@ "documentation":"

The list of images built by this pipeline.

" }, "nextToken":{ - "shape":"NonEmptyString", + "shape":"PaginationToken", "documentation":"

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

" } } @@ -3057,7 +3057,7 @@ "box":true }, "nextToken":{ - "shape":"NonEmptyString", + "shape":"PaginationToken", "documentation":"

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

" } } @@ -3074,7 +3074,7 @@ "documentation":"

The list of image pipelines.

" }, "nextToken":{ - "shape":"NonEmptyString", + "shape":"PaginationToken", "documentation":"

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

" } } @@ -3096,7 +3096,7 @@ "box":true }, "nextToken":{ - "shape":"NonEmptyString", + "shape":"PaginationToken", "documentation":"

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

" } } @@ -3113,7 +3113,7 @@ "documentation":"

The list of image pipelines.

" }, "nextToken":{ - "shape":"NonEmptyString", + "shape":"PaginationToken", "documentation":"

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

" } } @@ -3135,7 +3135,7 @@ "box":true }, "nextToken":{ - "shape":"NonEmptyString", + "shape":"PaginationToken", "documentation":"

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

" } } @@ -3152,7 +3152,7 @@ "documentation":"

The list of image semantic versions.

" }, "nextToken":{ - "shape":"NonEmptyString", + "shape":"PaginationToken", "documentation":"

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

" } } @@ -3170,7 +3170,7 @@ "box":true }, "nextToken":{ - "shape":"NonEmptyString", + "shape":"PaginationToken", "documentation":"

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

" } } @@ -3187,7 +3187,7 @@ "documentation":"

The list of infrastructure configurations.

" }, "nextToken":{ - "shape":"NonEmptyString", + "shape":"PaginationToken", "documentation":"

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

" } } @@ -3257,6 +3257,11 @@ "Amazon" ] }, + "PaginationToken":{ + "type":"string", + "max":65535, + "min":1 + }, "PipelineExecutionStartCondition":{ "type":"string", "enum":[ diff --git a/services/inspector/pom.xml b/services/inspector/pom.xml index 259f97310a98..eac7e2cb84cf 100644 --- a/services/inspector/pom.xml +++ b/services/inspector/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT inspector AWS Java SDK :: Services :: Amazon Inspector Service diff --git a/services/iot/pom.xml b/services/iot/pom.xml index 5143e7786301..837e77cec490 100644 --- a/services/iot/pom.xml +++ b/services/iot/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT iot AWS Java SDK :: Services :: AWS IoT diff --git a/services/iot/src/main/resources/codegen-resources/paginators-1.json b/services/iot/src/main/resources/codegen-resources/paginators-1.json index ad854f2b1add..c4c12bf3f80c 100644 --- a/services/iot/src/main/resources/codegen-resources/paginators-1.json +++ b/services/iot/src/main/resources/codegen-resources/paginators-1.json @@ -221,6 +221,12 @@ "output_token": "nextToken", "result_key": "thingGroups" }, + "ListThingPrincipals": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "principals" + }, "ListThingRegistrationTaskReports": { "input_token": "nextToken", "limit_key": "maxResults", diff --git a/services/iot/src/main/resources/codegen-resources/service-2.json b/services/iot/src/main/resources/codegen-resources/service-2.json index c813b497f152..3cbd4f4d20db 100644 --- a/services/iot/src/main/resources/codegen-resources/service-2.json +++ b/services/iot/src/main/resources/codegen-resources/service-2.json @@ -989,6 +989,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"DeleteConflictException"}, {"shape":"ThrottlingException"}, + {"shape":"ConflictingResourceUpdateException"}, {"shape":"UnauthorizedException"} ], "documentation":"

Deletes a fleet provisioning template.

" @@ -1007,6 +1008,7 @@ {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"}, {"shape":"UnauthorizedException"}, + {"shape":"ConflictingResourceUpdateException"}, {"shape":"DeleteConflictException"} ], "documentation":"

Deletes a fleet provisioning template version.

" @@ -2639,7 +2641,8 @@ "errors":[ {"shape":"InvalidRequestException"}, {"shape":"InternalFailureException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} ], "documentation":"

List the thing groups in your account.

" }, @@ -2654,7 +2657,8 @@ "errors":[ {"shape":"InvalidRequestException"}, {"shape":"InternalFailureException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} ], "documentation":"

List the thing groups to which the specified thing belongs.

" }, @@ -2769,7 +2773,8 @@ "errors":[ {"shape":"InvalidRequestException"}, {"shape":"InternalFailureException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} ], "documentation":"

Lists the things in the specified group.

" }, @@ -3705,7 +3710,7 @@ "type":"string", "max":2048, "min":1, - "pattern":"arn:aws:acm:[a-z]{2}-(gov-)?[a-z]{4,9}-\\d{1}:\\d{12}:certificate/?[a-zA-Z0-9/-]+" + "pattern":"arn:aws(-cn|-us-gov|-iso-b|-iso)?:acm:[a-z]{2}-(gov-|iso-|isob-)?[a-z]{4,9}-\\d{1}:\\d{12}:certificate/[a-zA-Z0-9/-]+" }, "Action":{ "type":"structure", @@ -4080,6 +4085,12 @@ "comment":{ "shape":"Comment", "documentation":"

An optional comment string describing why the job was associated with the targets.

" + }, + "namespaceId":{ + "shape":"NamespaceId", + "documentation":"

The namespace used to indicate that a job is a customer-managed job.

When you specify a value for this parameter, AWS IoT Core sends jobs notifications to MQTT topics that contain the value in the following format.

$aws/things/THING_NAME/jobs/JOB_ID/notify-namespace-NAMESPACE_ID/

The namespaceId feature is in public preview.

", + "location":"querystring", + "locationName":"namespaceId" } } }, @@ -4661,7 +4672,10 @@ "type":"list", "member":{"shape":"AuthResult"} }, - "AuthorizerArn":{"type":"string"}, + "AuthorizerArn":{ + "type":"string", + "max":2048 + }, "AuthorizerConfig":{ "type":"structure", "members":{ @@ -4719,7 +4733,10 @@ }, "documentation":"

The authorizer description.

" }, - "AuthorizerFunctionArn":{"type":"string"}, + "AuthorizerFunctionArn":{ + "type":"string", + "max":2048 + }, "AuthorizerName":{ "type":"string", "max":128, @@ -4916,6 +4933,7 @@ "documentation":"

Specifies the amount of time each device has to finish its execution of the job. A timer is started when the job execution status is set to IN_PROGRESS. If the job execution status is not set to another terminal state before the timer expires, it will be automatically set to TIMED_OUT.

" }, "AwsJobTimeoutInProgressTimeoutInMinutes":{"type":"long"}, + "BatchMode":{"type":"boolean"}, "Behavior":{ "type":"structure", "required":["name"], @@ -6034,6 +6052,10 @@ "tags":{ "shape":"TagList", "documentation":"

Metadata which can be used to manage the job.

" + }, + "namespaceId":{ + "shape":"NamespaceId", + "documentation":"

The namespace used to indicate that a job is a customer-managed job.

When you specify a value for this parameter, AWS IoT Core sends jobs notifications to MQTT topics that contain the value in the following format.

$aws/things/THING_NAME/jobs/JOB_ID/notify-namespace-NAMESPACE_ID/

The namespaceId feature is in public preview.

" } } }, @@ -6556,7 +6578,7 @@ }, "additionalMetricsToRetain":{ "shape":"AdditionalMetricsToRetainList", - "documentation":"

A list of metrics whose data is retained (stored). By default, data is retained for any metric used in the profile's behaviors, but it is also retained for any metric specified here.

Note: This API field is deprecated. Please use CreateSecurityProfileRequest$additionalMetricsToRetainV2 instead.

", + "documentation":"

Please use CreateSecurityProfileRequest$additionalMetricsToRetainV2 instead.

A list of metrics whose data is retained (stored). By default, data is retained for any metric used in the profile's behaviors, but it is also retained for any metric specified here.

", "deprecated":true, "deprecatedMessage":"Use additionalMetricsToRetainV2." }, @@ -7068,6 +7090,12 @@ "documentation":"

(Optional) When true, you can delete a job execution which is \"IN_PROGRESS\". Otherwise, you can only delete a job execution which is in a terminal state (\"SUCCEEDED\", \"FAILED\", \"REJECTED\", \"REMOVED\" or \"CANCELED\") or an exception will occur. The default is false.

Deleting a job execution which is \"IN_PROGRESS\", will cause the device to be unable to access job information or update the job execution status. Use caution and ensure that the device is able to recover to a valid state.

", "location":"querystring", "locationName":"force" + }, + "namespaceId":{ + "shape":"NamespaceId", + "documentation":"

The namespace used to indicate that a job is a customer-managed job.

When you specify a value for this parameter, AWS IoT Core sends jobs notifications to MQTT topics that contain the value in the following format.

$aws/things/THING_NAME/jobs/JOB_ID/notify-namespace-NAMESPACE_ID/

The namespaceId feature is in public preview.

", + "location":"querystring", + "locationName":"namespaceId" } } }, @@ -7086,6 +7114,12 @@ "documentation":"

(Optional) When true, you can delete a job which is \"IN_PROGRESS\". Otherwise, you can only delete a job which is in a terminal state (\"COMPLETED\" or \"CANCELED\") or an exception will occur. The default is false.

Deleting a job which is \"IN_PROGRESS\", will cause a device which is executing the job to be unable to access job information or update the job execution status. Use caution and ensure that each device executing a job which is deleted is able to recover to a valid state.

", "location":"querystring", "locationName":"force" + }, + "namespaceId":{ + "shape":"NamespaceId", + "documentation":"

The namespace used to indicate that a job is a customer-managed job.

When you specify a value for this parameter, AWS IoT Core sends jobs notifications to MQTT topics that contain the value in the following format.

$aws/things/THING_NAME/jobs/JOB_ID/notify-namespace-NAMESPACE_ID/

The namespaceId feature is in public preview.

", + "location":"querystring", + "locationName":"namespaceId" } } }, @@ -7827,6 +7861,10 @@ "domainType":{ "shape":"DomainType", "documentation":"

The type of the domain.

" + }, + "lastStatusChangeDate":{ + "shape":"DateType", + "documentation":"

The date and time the domain configuration's status was last changed.

" } } }, @@ -8211,7 +8249,7 @@ }, "additionalMetricsToRetain":{ "shape":"AdditionalMetricsToRetainList", - "documentation":"

A list of metrics whose data is retained (stored). By default, data is retained for any metric used in the profile's behaviors, but it is also retained for any metric specified here.

Note: This API field is deprecated. Please use DescribeSecurityProfileResponse$additionalMetricsToRetainV2 instead.

", + "documentation":"

Please use DescribeSecurityProfileResponse$additionalMetricsToRetainV2 instead.

A list of metrics whose data is retained (stored). By default, data is retained for any metric used in the profile's behaviors, but it is also retained for any metric specified here.

", "deprecated":true, "deprecatedMessage":"Use additionalMetricsToRetainV2." }, @@ -8585,9 +8623,8 @@ }, "DetailsValue":{ "type":"string", - "max":1024, "min":1, - "pattern":"[^\\p{C}]*+" + "pattern":"[^\\p{C}]+" }, "DeviceCertificateUpdateAction":{ "type":"string", @@ -8957,11 +8994,11 @@ }, "incrementFactor":{ "shape":"IncrementFactor", - "documentation":"

The exponential factor to increase the rate of rollout for a job.

" + "documentation":"

The exponential factor to increase the rate of rollout for a job.

AWS IoT supports up to one digit after the decimal (for example, 1.5, but not 1.55).

" }, "rateIncreaseCriteria":{ "shape":"RateIncreaseCriteria", - "documentation":"

The criteria to initiate the increase in rate of rollout for a job.

AWS IoT supports up to one digit after the decimal (for example, 1.5, but not 1.55).

" + "documentation":"

The criteria to initiate the increase in rate of rollout for a job.

" } }, "documentation":"

Allows you to create an exponential rate of rollout for a job.

" @@ -9016,6 +9053,11 @@ "documentation":"

The location of the OTA update.

" }, "FileName":{"type":"string"}, + "FileType":{ + "type":"integer", + "max":255, + "min":0 + }, "FindingId":{ "type":"string", "max":128, @@ -9046,6 +9088,10 @@ "separator":{ "shape":"FirehoseSeparator", "documentation":"

A character separator that will be used to separate records written to the Firehose stream. Valid values are: '\\n' (newline), '\\t' (tab), '\\r\\n' (Windows newline), ',' (comma).

" + }, + "batchMode":{ + "shape":"BatchMode", + "documentation":"

Whether to deliver the Kinesis Data Firehose stream as a batch by using PutRecordBatch . The default value is false.

When batchMode is true and the rule's SQL statement evaluates to an Array, each Array element forms one record in the PutRecordBatch request. The resulting array can't have more than 500 records.

" } }, "documentation":"

Describes an action that writes data to an Amazon Kinesis Firehose stream.

" @@ -9753,6 +9799,10 @@ "shape":"ChannelName", "documentation":"

The name of the IoT Analytics channel to which message data will be sent.

" }, + "batchMode":{ + "shape":"BatchMode", + "documentation":"

Whether to process the action as a batch. The default value is false.

When batchMode is true and the rule SQL statement evaluates to an Array, each Array element is delivered as a separate message when passed by BatchPutMessage to the AWS IoT Analytics channel. The resulting array can't have more than 100 messages.

" + }, "roleArn":{ "shape":"AwsArn", "documentation":"

The ARN of the role which has a policy that grants IoT Analytics permission to send message data via IoT Analytics (iotanalytics:BatchPutMessage).

" @@ -9773,7 +9823,11 @@ }, "messageId":{ "shape":"MessageId", - "documentation":"

[Optional] Use this to ensure that only one input (message) with a given messageId will be processed by an AWS IoT Events detector.

" + "documentation":"

The ID of the message. The default messageId is a new UUID value.

When batchMode is true, you can't specify a messageId--a new UUID value will be assigned.

Assign a value to this property to ensure that only one input (message) with a given messageId will be processed by an AWS IoT Events detector.

" + }, + "batchMode":{ + "shape":"BatchMode", + "documentation":"

Whether to process the event actions as a batch. The default value is false.

When batchMode is true, you can't specify a messageId.

When batchMode is true and the rule SQL statement evaluates to an Array, each Array element is treated as a separate message when it's sent to AWS IoT Events by calling BatchPutMessage . The resulting array can't have more than 10 messages.

" }, "roleArn":{ "shape":"AwsArn", @@ -9874,6 +9928,10 @@ "timeoutConfig":{ "shape":"TimeoutConfig", "documentation":"

Specifies the amount of time each device has to finish its execution of the job. A timer is started when the job execution status is set to IN_PROGRESS. If the job execution status is not set to another terminal state before the timer expires, it will be automatically set to TIMED_OUT.

" + }, + "namespaceId":{ + "shape":"NamespaceId", + "documentation":"

The namespace used to indicate that a job is a customer-managed job.

When you specify a value for this parameter, AWS IoT Core sends jobs notifications to MQTT topics that contain the value in the following format.

$aws/things/THING_NAME/jobs/JOB_ID/notify-namespace-NAMESPACE_ID/

The namespaceId feature is in public preview.

" } }, "documentation":"

The Job object contains details about a job.

" @@ -10620,7 +10678,7 @@ "members":{ "nextToken":{ "shape":"NextToken", - "documentation":"

The token to retrieve the next set of results.

", + "documentation":"

To retrieve the next set of results, the nextToken value from a previous response; otherwise null to receive the first set of results.

", "location":"querystring", "locationName":"nextToken" }, @@ -10647,7 +10705,7 @@ }, "nextToken":{ "shape":"NextToken", - "documentation":"

The token used to get the next set of results, or null if there are no additional results.

" + "documentation":"

The token to use to get the next set of results, or null if there are no additional results.

" } } }, @@ -10927,6 +10985,12 @@ "location":"querystring", "locationName":"status" }, + "namespaceId":{ + "shape":"NamespaceId", + "documentation":"

The namespace used to indicate that a job is a customer-managed job.

When you specify a value for this parameter, AWS IoT Core sends jobs notifications to MQTT topics that contain the value in the following format.

$aws/things/THING_NAME/jobs/JOB_ID/notify-namespace-NAMESPACE_ID/

The namespaceId feature is in public preview.

", + "location":"querystring", + "locationName":"namespaceId" + }, "maxResults":{ "shape":"LaserMaxResults", "documentation":"

The maximum number of results to be returned per request.

", @@ -10992,6 +11056,12 @@ "documentation":"

A filter that limits the returned jobs to those for the specified group.

", "location":"querystring", "locationName":"thingGroupId" + }, + "namespaceId":{ + "shape":"NamespaceId", + "documentation":"

The namespace used to indicate that a job is a customer-managed job.

When you specify a value for this parameter, AWS IoT Core sends jobs notifications to MQTT topics that contain the value in the following format.

$aws/things/THING_NAME/jobs/JOB_ID/notify-namespace-NAMESPACE_ID/

The namespaceId feature is in public preview.

", + "location":"querystring", + "locationName":"namespaceId" } } }, @@ -11275,7 +11345,7 @@ "members":{ "nextToken":{ "shape":"NextToken", - "documentation":"

The token to retrieve the next set of results.

", + "documentation":"

To retrieve the next set of results, the nextToken value from a previous response; otherwise null to receive the first set of results.

", "location":"querystring", "locationName":"nextToken" }, @@ -11303,7 +11373,7 @@ }, "nextToken":{ "shape":"NextToken", - "documentation":"

The token used to get the next set of results, or null if there are no additional results.

" + "documentation":"

The token to use to get the next set of results, or null if there are no additional results.

" } }, "documentation":"

The output from the ListPrincipalThings operation.

" @@ -11569,7 +11639,7 @@ }, "nextToken":{ "shape":"NextToken", - "documentation":"

The token to retrieve the next set of results.

", + "documentation":"

To retrieve the next set of results, the nextToken value from a previous response; otherwise null to receive the first set of results.

", "location":"querystring", "locationName":"nextToken" } @@ -11584,7 +11654,7 @@ }, "nextToken":{ "shape":"NextToken", - "documentation":"

The token used to get the next set of results, or null if there are no additional results.

" + "documentation":"

The token to use to get the next set of results, or null if there are no additional results.

" } } }, @@ -11674,7 +11744,7 @@ }, "nextToken":{ "shape":"NextToken", - "documentation":"

The token to retrieve the next set of results.

", + "documentation":"

To retrieve the next set of results, the nextToken value from a previous response; otherwise null to receive the first set of results.

", "location":"querystring", "locationName":"nextToken" }, @@ -11695,7 +11765,7 @@ }, "nextToken":{ "shape":"NextToken", - "documentation":"

The token used to get the next set of results, or null if there are no additional results.

" + "documentation":"

The token to use to get the next set of results, or null if there are no additional results.

" } } }, @@ -11704,7 +11774,7 @@ "members":{ "nextToken":{ "shape":"NextToken", - "documentation":"

The token to retrieve the next set of results.

", + "documentation":"

To retrieve the next set of results, the nextToken value from a previous response; otherwise null to receive the first set of results.

", "location":"querystring", "locationName":"nextToken" }, @@ -11743,7 +11813,7 @@ }, "nextToken":{ "shape":"NextToken", - "documentation":"

The token used to get the next set of results. Will not be returned if operation has returned all results.

" + "documentation":"

The token to use to get the next set of results. Will not be returned if operation has returned all results.

" } } }, @@ -11751,6 +11821,18 @@ "type":"structure", "required":["thingName"], "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

To retrieve the next set of results, the nextToken value from a previous response; otherwise null to receive the first set of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"RegistryMaxResults", + "documentation":"

The maximum number of results to return in this operation.

", + "location":"querystring", + "locationName":"maxResults" + }, "thingName":{ "shape":"ThingName", "documentation":"

The name of the thing.

", @@ -11766,6 +11848,10 @@ "principals":{ "shape":"Principals", "documentation":"

The principals associated with the thing.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token to use to get the next set of results, or null if there are no additional results.

" } }, "documentation":"

The output from the ListThingPrincipals operation.

" @@ -11791,7 +11877,7 @@ }, "nextToken":{ "shape":"NextToken", - "documentation":"

The token to retrieve the next set of results.

", + "documentation":"

To retrieve the next set of results, the nextToken value from a previous response; otherwise null to receive the first set of results.

", "location":"querystring", "locationName":"nextToken" }, @@ -11816,7 +11902,7 @@ }, "nextToken":{ "shape":"NextToken", - "documentation":"

The token used to get the next set of results, or null if there are no additional results.

" + "documentation":"

The token to use to get the next set of results, or null if there are no additional results.

" } } }, @@ -11825,7 +11911,7 @@ "members":{ "nextToken":{ "shape":"NextToken", - "documentation":"

The token to retrieve the next set of results.

", + "documentation":"

To retrieve the next set of results, the nextToken value from a previous response; otherwise null to receive the first set of results.

", "location":"querystring", "locationName":"nextToken" }, @@ -11852,7 +11938,7 @@ }, "nextToken":{ "shape":"NextToken", - "documentation":"

The token used to get the next set of results, or null if there are no additional results.

" + "documentation":"

The token to use to get the next set of results, or null if there are no additional results.

" } } }, @@ -11861,7 +11947,7 @@ "members":{ "nextToken":{ "shape":"NextToken", - "documentation":"

The token to retrieve the next set of results.

", + "documentation":"

To retrieve the next set of results, the nextToken value from a previous response; otherwise null to receive the first set of results.

", "location":"querystring", "locationName":"nextToken" }, @@ -11906,7 +11992,7 @@ }, "nextToken":{ "shape":"NextToken", - "documentation":"

The token to retrieve the next set of results.

", + "documentation":"

To retrieve the next set of results, the nextToken value from a previous response; otherwise null to receive the first set of results.

", "location":"querystring", "locationName":"nextToken" }, @@ -11927,7 +12013,7 @@ }, "nextToken":{ "shape":"NextToken", - "documentation":"

The token used to get the next set of results. Will not be returned if operation has returned all results.

" + "documentation":"

The token to use to get the next set of results. Will not be returned if operation has returned all results.

" } } }, @@ -11949,7 +12035,7 @@ }, "nextToken":{ "shape":"NextToken", - "documentation":"

The token to retrieve the next set of results.

", + "documentation":"

To retrieve the next set of results, the nextToken value from a previous response; otherwise null to receive the first set of results.

", "location":"querystring", "locationName":"nextToken" }, @@ -11970,7 +12056,7 @@ }, "nextToken":{ "shape":"NextToken", - "documentation":"

The token used to get the next set of results, or null if there are no additional results.

" + "documentation":"

The token to use to get the next set of results, or null if there are no additional results.

" } } }, @@ -11979,7 +12065,7 @@ "members":{ "nextToken":{ "shape":"NextToken", - "documentation":"

The token to retrieve the next set of results.

", + "documentation":"

To retrieve the next set of results, the nextToken value from a previous response; otherwise null to receive the first set of results.

", "location":"querystring", "locationName":"nextToken" }, @@ -12019,7 +12105,7 @@ }, "nextToken":{ "shape":"NextToken", - "documentation":"

The token used to get the next set of results. Will not be returned if operation has returned all results.

" + "documentation":"

The token to use to get the next set of results. Will not be returned if operation has returned all results.

" } }, "documentation":"

The output from the ListThings operation.

" @@ -12035,7 +12121,7 @@ }, "nextToken":{ "shape":"NextToken", - "documentation":"

The token to retrieve the next set of results.

", + "documentation":"

To retrieve the next set of results, the nextToken value from a previous response; otherwise null to receive the first set of results.

", "location":"querystring", "locationName":"nextToken" } @@ -12050,7 +12136,7 @@ }, "nextToken":{ "shape":"NextToken", - "documentation":"

The token to retrieve the next set of results.

" + "documentation":"

The token to use to get the next set of results, or null if there are no additional results.

" } } }, @@ -12071,7 +12157,7 @@ }, "nextToken":{ "shape":"NextToken", - "documentation":"

A token used to retrieve the next value.

", + "documentation":"

To retrieve the next set of results, the nextToken value from a previous response; otherwise null to receive the first set of results.

", "location":"querystring", "locationName":"nextToken" }, @@ -12093,7 +12179,7 @@ }, "nextToken":{ "shape":"NextToken", - "documentation":"

A token used to retrieve the next value.

" + "documentation":"

The token to use to get the next set of results, or null if there are no additional results.

" } }, "documentation":"

The output from the ListTopicRules operation.

" @@ -12109,7 +12195,7 @@ }, "nextToken":{ "shape":"NextToken", - "documentation":"

The token used to get the next set of results, or null if there are no additional results.

", + "documentation":"

To retrieve the next set of results, the nextToken value from a previous response; otherwise null to receive the first set of results.

", "location":"querystring", "locationName":"nextToken" }, @@ -12130,7 +12216,7 @@ }, "nextToken":{ "shape":"NextToken", - "documentation":"

The token used to get the next set of results, or null if there are no additional results.

" + "documentation":"

The token to use to get the next set of results, or null if there are no additional results.

" } } }, @@ -12273,6 +12359,7 @@ }, "Marker":{ "type":"string", + "max":1024, "pattern":"[A-Za-z0-9+/]+={0,2}" }, "MaxJobExecutionsPerMin":{ @@ -12499,6 +12586,12 @@ "max":65535, "min":1 }, + "NamespaceId":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[a-zA-Z0-9_-]+" + }, "NextToken":{"type":"string"}, "NonCompliantChecksCount":{"type":"integer"}, "NonCompliantResource":{ @@ -12550,6 +12643,10 @@ "shape":"FileName", "documentation":"

The name of the file.

" }, + "fileType":{ + "shape":"FileType", + "documentation":"

An integer value you can include in the job document to allow your devices to identify the type of file received from the cloud.

" + }, "fileVersion":{ "shape":"OTAUpdateFileVersion", "documentation":"

The file version.

" @@ -13455,7 +13552,10 @@ "min":1, "pattern":"[\\w.:-]+" }, - "Resource":{"type":"string"}, + "Resource":{ + "type":"string", + "max":2048 + }, "ResourceAlreadyExistsException":{ "type":"structure", "members":{ @@ -13646,7 +13746,7 @@ }, "key":{ "shape":"Key", - "documentation":"

The object key.

" + "documentation":"

The object key. For more information, see Actions, resources, and condition keys for Amazon S3.

" }, "cannedAcl":{ "shape":"CannedAccessControlList", @@ -14063,7 +14163,7 @@ "documentation":"

The ARN of the signing role.

" } }, - "documentation":"

Use Sig V4 authorization.

" + "documentation":"

For more information, see Signature Version 4 signing process.

" }, "Signature":{"type":"blob"}, "SignatureAlgorithm":{"type":"string"}, @@ -15384,7 +15484,7 @@ "members":{ "sql":{ "shape":"SQL", - "documentation":"

The SQL statement used to query the topic. For more information, see AWS IoT SQL Reference in the AWS IoT Developer Guide.

" + "documentation":"

The SQL statement used to query the topic. For more information, see AWS IoT SQL Reference in the AWS IoT Developer Guide.

" }, "description":{ "shape":"Description", @@ -15927,6 +16027,12 @@ "timeoutConfig":{ "shape":"TimeoutConfig", "documentation":"

Specifies the amount of time each device has to finish its execution of the job. The timer is started when the job execution status is set to IN_PROGRESS. If the job execution status is not set to another terminal state before the time expires, it will be automatically set to TIMED_OUT.

" + }, + "namespaceId":{ + "shape":"NamespaceId", + "documentation":"

The namespace used to indicate that a job is a customer-managed job.

When you specify a value for this parameter, AWS IoT Core sends jobs notifications to MQTT topics that contain the value in the following format.

$aws/things/THING_NAME/jobs/JOB_ID/notify-namespace-NAMESPACE_ID/

The namespaceId feature is in public preview.

", + "location":"querystring", + "locationName":"namespaceId" } } }, @@ -16098,7 +16204,7 @@ }, "additionalMetricsToRetain":{ "shape":"AdditionalMetricsToRetainList", - "documentation":"

A list of metrics whose data is retained (stored). By default, data is retained for any metric used in the profile's behaviors, but it is also retained for any metric specified here.

Note: This API field is deprecated. Please use UpdateSecurityProfileRequest$additionalMetricsToRetainV2 instead.

", + "documentation":"

Please use UpdateSecurityProfileRequest$additionalMetricsToRetainV2 instead.

A list of metrics whose data is retained (stored). By default, data is retained for any metric used in the profile's behaviors, but it is also retained for any metric specified here.

", "deprecated":true, "deprecatedMessage":"Use additionalMetricsToRetainV2." }, @@ -16151,7 +16257,7 @@ }, "additionalMetricsToRetain":{ "shape":"AdditionalMetricsToRetainList", - "documentation":"

A list of metrics whose data is retained (stored). By default, data is retained for any metric used in the security profile's behaviors, but it is also retained for any metric specified here.

Note: This API field is deprecated. Please use UpdateSecurityProfileResponse$additionalMetricsToRetainV2 instead.

", + "documentation":"

Please use UpdateSecurityProfileResponse$additionalMetricsToRetainV2 instead.

A list of metrics whose data is retained (stored). By default, data is retained for any metric used in the security profile's behaviors, but it is also retained for any metric specified here.

", "deprecated":true, "deprecatedMessage":"Use additionalMetricsToRetainV2." }, diff --git a/services/iot1clickdevices/pom.xml b/services/iot1clickdevices/pom.xml index c262efe3f075..3cde3cc2d1f8 100644 --- a/services/iot1clickdevices/pom.xml +++ b/services/iot1clickdevices/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT iot1clickdevices AWS Java SDK :: Services :: IoT 1Click Devices Service diff --git a/services/iot1clickprojects/pom.xml b/services/iot1clickprojects/pom.xml index e434cbcca33b..19de1b60a18b 100644 --- a/services/iot1clickprojects/pom.xml +++ b/services/iot1clickprojects/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT iot1clickprojects AWS Java SDK :: Services :: IoT 1Click Projects diff --git a/services/iotanalytics/pom.xml b/services/iotanalytics/pom.xml index 0a7dcb2ed13b..6b4eb16fbb33 100644 --- a/services/iotanalytics/pom.xml +++ b/services/iotanalytics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT iotanalytics AWS Java SDK :: Services :: IoTAnalytics diff --git a/services/iotanalytics/src/main/resources/codegen-resources/service-2.json b/services/iotanalytics/src/main/resources/codegen-resources/service-2.json index 3b4e49144c73..508b6a032c54 100644 --- a/services/iotanalytics/src/main/resources/codegen-resources/service-2.json +++ b/services/iotanalytics/src/main/resources/codegen-resources/service-2.json @@ -82,7 +82,7 @@ {"shape":"ThrottlingException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Creates a data set. A data set stores data retrieved from a data store by applying a \"queryAction\" (a SQL query) or a \"containerAction\" (executing a containerized application). This operation creates the skeleton of a data set. The data set can be populated manually by calling \"CreateDatasetContent\" or automatically according to a \"trigger\" you specify.

" + "documentation":"

Creates a dataset. A dataset stores data retrieved from a data store by applying a queryAction (a SQL query) or a containerAction (executing a containerized application). This operation creates the skeleton of a dataset. The dataset can be populated manually by calling CreateDatasetContent or automatically according to a trigger you specify.

" }, "CreateDatasetContent":{ "name":"CreateDatasetContent", @@ -99,7 +99,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Creates the content of a data set by applying a \"queryAction\" (a SQL query) or a \"containerAction\" (executing a containerized application).

" + "documentation":"

Creates the content of a data set by applying a queryAction (a SQL query) or a containerAction (executing a containerized application).

" }, "CreateDatastore":{ "name":"CreateDatastore", @@ -171,7 +171,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Deletes the specified data set.

You do not have to delete the content of the data set before you perform this operation.

" + "documentation":"

Deletes the specified dataset.

You do not have to delete the content of the dataset before you perform this operation.

" }, "DeleteDatasetContent":{ "name":"DeleteDatasetContent", @@ -188,7 +188,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Deletes the content of the specified data set.

" + "documentation":"

Deletes the content of the specified dataset.

" }, "DeleteDatastore":{ "name":"DeleteDatastore", @@ -256,7 +256,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Retrieves information about a data set.

" + "documentation":"

Retrieves information about a dataset.

" }, "DescribeDatastore":{ "name":"DescribeDatastore", @@ -324,7 +324,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Retrieves the contents of a data set as pre-signed URIs.

" + "documentation":"

Retrieves the contents of a data set as presigned URIs.

" }, "ListChannels":{ "name":"ListChannels", @@ -423,7 +423,7 @@ {"shape":"LimitExceededException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Lists the tags (metadata) which you have assigned to the resource.

" + "documentation":"

Lists the tags (metadata) that you have assigned to the resource.

" }, "PutLoggingOptions":{ "name":"PutLoggingOptions", @@ -438,7 +438,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Sets or updates the AWS IoT Analytics logging options.

Note that if you update the value of any loggingOptions field, it takes up to one minute for the change to take effect. Also, if you change the policy attached to the role you specified in the roleArn field (for example, to correct an invalid policy) it takes up to 5 minutes for that change to take effect.

" + "documentation":"

Sets or updates the AWS IoT Analytics logging options.

If you update the value of any loggingOptions field, it takes up to one minute for the change to take effect. Also, if you change the policy attached to the role you specified in the roleArn field (for example, to correct an invalid policy), it takes up to five minutes for that change to take effect.

" }, "RunPipelineActivity":{ "name":"RunPipelineActivity", @@ -508,7 +508,7 @@ {"shape":"LimitExceededException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Adds to or modifies the tags of the given resource. Tags are metadata which can be used to manage a resource.

" + "documentation":"

Adds to or modifies the tags of the given resource. Tags are metadata that can be used to manage a resource.

" }, "UntagResource":{ "name":"UntagResource", @@ -615,11 +615,11 @@ "members":{ "name":{ "shape":"ActivityName", - "documentation":"

The name of the 'addAttributes' activity.

" + "documentation":"

The name of the addAttributes activity.

" }, "attributes":{ "shape":"AttributeNameMapping", - "documentation":"

A list of 1-50 \"AttributeNameMapping\" objects that map an existing attribute to a new attribute.

The existing attributes remain in the message, so if you want to remove the originals, use \"RemoveAttributeActivity\".

" + "documentation":"

A list of 1-50 AttributeNameMapping objects that map an existing attribute to a new attribute.

The existing attributes remain in the message, so if you want to remove the originals, use RemoveAttributeActivity.

" }, "next":{ "shape":"ActivityName", @@ -655,7 +655,7 @@ "members":{ "messageId":{ "shape":"MessageId", - "documentation":"

The ID of the message that caused the error. (See the value corresponding to the \"messageId\" key in the message object.)

" + "documentation":"

The ID of the message that caused the error. See the value corresponding to the messageId key in the message object.

" }, "errorCode":{ "shape":"ErrorCode", @@ -681,7 +681,7 @@ }, "messages":{ "shape":"Messages", - "documentation":"

The list of messages to be sent. Each message has format: '{ \"messageId\": \"string\", \"payload\": \"string\"}'.

Note that the field names of message payloads (data) that you send to AWS IoT Analytics:

  • Must contain only alphanumeric characters and undescores (_); no other special characters are allowed.

  • Must begin with an alphabetic character or single underscore (_).

  • Cannot contain hyphens (-).

  • In regular expression terms: \"^[A-Za-z_]([A-Za-z0-9]*|[A-Za-z0-9][A-Za-z0-9_]*)$\".

  • Cannot be greater than 255 characters.

  • Are case-insensitive. (Fields named \"foo\" and \"FOO\" in the same payload are considered duplicates.)

For example, {\"temp_01\": 29} or {\"_temp_01\": 29} are valid, but {\"temp-01\": 29}, {\"01_temp\": 29} or {\"__temp_01\": 29} are invalid in message payloads.

" + "documentation":"

The list of messages to be sent. Each message has the format: { \"messageId\": \"string\", \"payload\": \"string\"}.

The field names of message payloads (data) that you send to AWS IoT Analytics:

  • Must contain only alphanumeric characters and undescores (_). No other special characters are allowed.

  • Must begin with an alphabetic character or single underscore (_).

  • Cannot contain hyphens (-).

  • In regular expression terms: \"^[A-Za-z_]([A-Za-z0-9]*|[A-Za-z0-9][A-Za-z0-9_]*)$\".

  • Cannot be more than 255 characters.

  • Are case insensitive. (Fields named foo and FOO in the same payload are considered duplicates.)

For example, {\"temp_01\": 29} or {\"_temp_01\": 29} are valid, but {\"temp-01\": 29}, {\"01_temp\": 29} or {\"__temp_01\": 29} are invalid in message payloads.

" } } }, @@ -721,7 +721,7 @@ }, "reprocessingId":{ "shape":"ReprocessingId", - "documentation":"

The ID of the reprocessing task (returned by \"StartPipelineReprocessing\").

", + "documentation":"

The ID of the reprocessing task (returned by StartPipelineReprocessing).

", "location":"uri", "locationName":"reprocessingId" } @@ -741,7 +741,7 @@ }, "storage":{ "shape":"ChannelStorage", - "documentation":"

Where channel data is stored. You may choose one of \"serviceManagedS3\" or \"customerManagedS3\" storage. If not specified, the default is \"serviceManagedS3\". This cannot be changed after creation of the channel.

" + "documentation":"

Where channel data is stored. You can choose one of serviceManagedS3 or customerManagedS3 storage. If not specified, the default is serviceManagedS3. You cannot change this storage option after the channel is created.

" }, "arn":{ "shape":"ChannelArn", @@ -762,6 +762,10 @@ "lastUpdateTime":{ "shape":"Timestamp", "documentation":"

When the channel was last updated.

" + }, + "lastMessageArrivalTime":{ + "shape":"Timestamp", + "documentation":"

The last time when a new message arrived in the channel.

AWS IoT Analytics updates this value at most once per minute for one channel. Hence, the lastMessageArrivalTime value is an approximation.

This feature only applies to messages that arrived in the data store after October 23, 2020.

" } }, "documentation":"

A collection of data from an MQTT topic. Channels archive the raw, unprocessed messages before publishing the data to a pipeline.

" @@ -775,7 +779,7 @@ "members":{ "name":{ "shape":"ActivityName", - "documentation":"

The name of the 'channel' activity.

" + "documentation":"

The name of the channel activity.

" }, "channelName":{ "shape":"ChannelName", @@ -818,21 +822,21 @@ "members":{ "serviceManagedS3":{ "shape":"ServiceManagedChannelS3Storage", - "documentation":"

Use this to store channel data in an S3 bucket managed by the AWS IoT Analytics service. The choice of service-managed or customer-managed S3 storage cannot be changed after creation of the channel.

" + "documentation":"

Use this to store channel data in an S3 bucket managed by AWS IoT Analytics. You cannot change the choice of service-managed or customer-managed S3 storage after the channel is created.

" }, "customerManagedS3":{ "shape":"CustomerManagedChannelS3Storage", - "documentation":"

Use this to store channel data in an S3 bucket that you manage. If customer managed storage is selected, the \"retentionPeriod\" parameter is ignored. The choice of service-managed or customer-managed S3 storage cannot be changed after creation of the channel.

" + "documentation":"

Use this to store channel data in an S3 bucket that you manage. If customer managed storage is selected, the retentionPeriod parameter is ignored. You cannot change the choice of service-managed or customer-managed S3 storage after the channel is created.

" } }, - "documentation":"

Where channel data is stored. You may choose one of \"serviceManagedS3\" or \"customerManagedS3\" storage. If not specified, the default is \"serviceManagedS3\". This cannot be changed after creation of the channel.

" + "documentation":"

Where channel data is stored. You may choose one of serviceManagedS3 or customerManagedS3 storage. If not specified, the default is serviceManagedS3. This cannot be changed after creation of the channel.

" }, "ChannelStorageSummary":{ "type":"structure", "members":{ "serviceManagedS3":{ "shape":"ServiceManagedChannelS3StorageSummary", - "documentation":"

Used to store channel data in an S3 bucket managed by the AWS IoT Analytics service.

" + "documentation":"

Used to store channel data in an S3 bucket managed by AWS IoT Analytics.

" }, "customerManagedS3":{ "shape":"CustomerManagedChannelS3StorageSummary", @@ -867,6 +871,10 @@ "lastUpdateTime":{ "shape":"Timestamp", "documentation":"

The last time the channel was updated.

" + }, + "lastMessageArrivalTime":{ + "shape":"Timestamp", + "documentation":"

The last time when a new message arrived in the channel.

AWS IoT Analytics updates this value at most once per minute for one channel. Hence, the lastMessageArrivalTime value is an approximation.

This feature only applies to messages that arrived in the data store after October 23, 2020.

" } }, "documentation":"

A summary of information about a channel.

" @@ -888,22 +896,22 @@ "members":{ "image":{ "shape":"Image", - "documentation":"

The ARN of the Docker container stored in your account. The Docker container contains an application and needed support libraries and is used to generate data set contents.

" + "documentation":"

The ARN of the Docker container stored in your account. The Docker container contains an application and required support libraries and is used to generate dataset contents.

" }, "executionRoleArn":{ "shape":"RoleArn", - "documentation":"

The ARN of the role which gives permission to the system to access needed resources in order to run the \"containerAction\". This includes, at minimum, permission to retrieve the data set contents which are the input to the containerized application.

" + "documentation":"

The ARN of the role that gives permission to the system to access required resources to run the containerAction. This includes, at minimum, permission to retrieve the dataset contents that are the input to the containerized application.

" }, "resourceConfiguration":{ "shape":"ResourceConfiguration", - "documentation":"

Configuration of the resource which executes the \"containerAction\".

" + "documentation":"

Configuration of the resource that executes the containerAction.

" }, "variables":{ "shape":"Variables", - "documentation":"

The values of variables used within the context of the execution of the containerized application (basically, parameters passed to the application). Each variable must have a name and a value given by one of \"stringValue\", \"datasetContentVersionValue\", or \"outputFileUriValue\".

" + "documentation":"

The values of variables used in the context of the execution of the containerized application (basically, parameters passed to the application). Each variable must have a name and a value given by one of stringValue, datasetContentVersionValue, or outputFileUriValue.

" } }, - "documentation":"

Information needed to run the \"containerAction\" to produce data set contents.

" + "documentation":"

Information required to run the containerAction to produce dataset contents.

" }, "CreateChannelRequest":{ "type":"structure", @@ -915,11 +923,11 @@ }, "channelStorage":{ "shape":"ChannelStorage", - "documentation":"

Where channel data is stored. You may choose one of \"serviceManagedS3\" or \"customerManagedS3\" storage. If not specified, the default is \"serviceManagedS3\". This cannot be changed after creation of the channel.

" + "documentation":"

Where channel data is stored. You can choose one of serviceManagedS3 or customerManagedS3 storage. If not specified, the default is serviceManagedS3. You cannot change this storage option after the channel is created.

" }, "retentionPeriod":{ "shape":"RetentionPeriod", - "documentation":"

How long, in days, message data is kept for the channel. When \"customerManagedS3\" storage is selected, this parameter is ignored.

" + "documentation":"

How long, in days, message data is kept for the channel. When customerManagedS3 storage is selected, this parameter is ignored.

" }, "tags":{ "shape":"TagList", @@ -950,9 +958,13 @@ "members":{ "datasetName":{ "shape":"DatasetName", - "documentation":"

The name of the data set.

", + "documentation":"

The name of the dataset.

", "location":"uri", "locationName":"datasetName" + }, + "versionId":{ + "shape":"DatasetContentVersion", + "documentation":"

The version ID of the dataset content. To specify versionId for a dataset content, the dataset must use a DeltaTimer filter.

" } } }, @@ -961,7 +973,7 @@ "members":{ "versionId":{ "shape":"DatasetContentVersion", - "documentation":"

The version ID of the data set contents which are being created.

" + "documentation":"

The version ID of the dataset contents that are being created.

" } } }, @@ -982,23 +994,27 @@ }, "triggers":{ "shape":"DatasetTriggers", - "documentation":"

A list of triggers. A trigger causes data set contents to be populated at a specified time interval or when another data set's contents are created. The list of triggers can be empty or contain up to five DataSetTrigger objects.

" + "documentation":"

A list of triggers. A trigger causes data set contents to be populated at a specified time interval or when another data set's contents are created. The list of triggers can be empty or contain up to five DataSetTrigger objects.

" }, "contentDeliveryRules":{ "shape":"DatasetContentDeliveryRules", - "documentation":"

When data set contents are created they are delivered to destinations specified here.

" + "documentation":"

When dataset contents are created, they are delivered to destinations specified here.

" }, "retentionPeriod":{ "shape":"RetentionPeriod", - "documentation":"

[Optional] How long, in days, versions of data set contents are kept for the data set. If not specified or set to null, versions of data set contents are retained for at most 90 days. The number of versions of data set contents retained is determined by the versioningConfiguration parameter. (For more information, see https://docs.aws.amazon.com/iotanalytics/latest/userguide/getting-started.html#aws-iot-analytics-dataset-versions)

" + "documentation":"

Optional. How long, in days, versions of dataset contents are kept for the dataset. If not specified or set to null, versions of dataset contents are retained for at most 90 days. The number of versions of dataset contents retained is determined by the versioningConfiguration parameter. For more information, see Keeping Multiple Versions of AWS IoT Analytics Data Sets in the AWS IoT Analytics User Guide.

" }, "versioningConfiguration":{ "shape":"VersioningConfiguration", - "documentation":"

[Optional] How many versions of data set contents are kept. If not specified or set to null, only the latest version plus the latest succeeded version (if they are different) are kept for the time period specified by the \"retentionPeriod\" parameter. (For more information, see https://docs.aws.amazon.com/iotanalytics/latest/userguide/getting-started.html#aws-iot-analytics-dataset-versions)

" + "documentation":"

Optional. How many versions of dataset contents are kept. If not specified or set to null, only the latest version plus the latest succeeded version (if they are different) are kept for the time period specified by the retentionPeriod parameter. For more information, see Keeping Multiple Versions of AWS IoT Analytics Data Sets in the AWS IoT Analytics User Guide.

" }, "tags":{ "shape":"TagList", "documentation":"

Metadata which can be used to manage the data set.

" + }, + "lateDataRules":{ + "shape":"LateDataRules", + "documentation":"

A list of data rules that send notifications to Amazon CloudWatch, when data arrives late. To specify lateDataRules, the dataset must use a DeltaTimer filter.

" } } }, @@ -1007,15 +1023,15 @@ "members":{ "datasetName":{ "shape":"DatasetName", - "documentation":"

The name of the data set.

" + "documentation":"

The name of the dataset.

" }, "datasetArn":{ "shape":"DatasetArn", - "documentation":"

The ARN of the data set.

" + "documentation":"

The ARN of the dataset.

" }, "retentionPeriod":{ "shape":"RetentionPeriod", - "documentation":"

How long, in days, data set contents are kept for the data set.

" + "documentation":"

How long, in days, dataset contents are kept for the dataset.

" } } }, @@ -1029,11 +1045,11 @@ }, "datastoreStorage":{ "shape":"DatastoreStorage", - "documentation":"

Where data store data is stored. You may choose one of \"serviceManagedS3\" or \"customerManagedS3\" storage. If not specified, the default is \"serviceManagedS3\". This cannot be changed after the data store is created.

" + "documentation":"

Where data store data is stored. You can choose one of serviceManagedS3 or customerManagedS3 storage. If not specified, the default is serviceManagedS3. You cannot change this storage option after the data store is created.

" }, "retentionPeriod":{ "shape":"RetentionPeriod", - "documentation":"

How long, in days, message data is kept for the data store. When \"customerManagedS3\" storage is selected, this parameter is ignored.

" + "documentation":"

How long, in days, message data is kept for the data store. When customerManagedS3 storage is selected, this parameter is ignored.

" }, "tags":{ "shape":"TagList", @@ -1071,7 +1087,7 @@ }, "pipelineActivities":{ "shape":"PipelineActivities", - "documentation":"

A list of \"PipelineActivity\" objects. Activities perform transformations on your messages, such as removing, renaming or adding message attributes; filtering messages based on attribute values; invoking your Lambda functions on messages for advanced processing; or performing mathematical transformations to normalize device data.

The list can be 2-25 PipelineActivity objects and must contain both a channel and a datastore activity. Each entry in the list must contain only one activity, for example:

pipelineActivities = [ { \"channel\": { ... } }, { \"lambda\": { ... } }, ... ]

" + "documentation":"

A list of PipelineActivity objects. Activities perform transformations on your messages, such as removing, renaming or adding message attributes; filtering messages based on attribute values; invoking your Lambda functions on messages for advanced processing; or performing mathematical transformations to normalize device data.

The list can be 2-25 PipelineActivity objects and must contain both a channel and a datastore activity. Each entry in the list must contain only one activity. For example:

pipelineActivities = [ { \"channel\": { ... } }, { \"lambda\": { ... } }, ... ]

" }, "tags":{ "shape":"TagList", @@ -1101,33 +1117,33 @@ "members":{ "bucket":{ "shape":"BucketName", - "documentation":"

The name of the Amazon S3 bucket in which channel data is stored.

" + "documentation":"

The name of the S3 bucket in which channel data is stored.

" }, "keyPrefix":{ "shape":"S3KeyPrefix", - "documentation":"

[Optional] The prefix used to create the keys of the channel data objects. Each object in an Amazon S3 bucket has a key that is its unique identifier within the bucket (each object in a bucket has exactly one key). The prefix must end with a '/'.

" + "documentation":"

Optional. The prefix used to create the keys of the channel data objects. Each object in an S3 bucket has a key that is its unique identifier in the bucket. Each object in a bucket has exactly one key. The prefix must end with a forward slash (/).

" }, "roleArn":{ "shape":"RoleArn", - "documentation":"

The ARN of the role which grants AWS IoT Analytics permission to interact with your Amazon S3 resources.

" + "documentation":"

The ARN of the role that grants AWS IoT Analytics permission to interact with your Amazon S3 resources.

" } }, - "documentation":"

Use this to store channel data in an S3 bucket that you manage. If customer managed storage is selected, the \"retentionPeriod\" parameter is ignored. The choice of service-managed or customer-managed S3 storage cannot be changed after creation of the channel.

" + "documentation":"

Use this to store channel data in an S3 bucket that you manage. If customer managed storage is selected, the retentionPeriod parameter is ignored. You cannot change the choice of service-managed or customer-managed S3 storage after the channel is created.

" }, "CustomerManagedChannelS3StorageSummary":{ "type":"structure", "members":{ "bucket":{ "shape":"BucketName", - "documentation":"

The name of the Amazon S3 bucket in which channel data is stored.

" + "documentation":"

The name of the S3 bucket in which channel data is stored.

" }, "keyPrefix":{ "shape":"S3KeyPrefix", - "documentation":"

[Optional] The prefix used to create the keys of the channel data objects. Each object in an Amazon S3 bucket has a key that is its unique identifier within the bucket (each object in a bucket has exactly one key). The prefix must end with a '/'.

" + "documentation":"

Optional. The prefix used to create the keys of the channel data objects. Each object in an S3 bucket has a key that is its unique identifier within the bucket (each object in a bucket has exactly one key). The prefix must end with a forward slash (/).

" }, "roleArn":{ "shape":"RoleArn", - "documentation":"

The ARN of the role which grants AWS IoT Analytics permission to interact with your Amazon S3 resources.

" + "documentation":"

The ARN of the role that grants AWS IoT Analytics permission to interact with your Amazon S3 resources.

" } }, "documentation":"

Used to store channel data in an S3 bucket that you manage.

" @@ -1141,33 +1157,33 @@ "members":{ "bucket":{ "shape":"BucketName", - "documentation":"

The name of the Amazon S3 bucket in which data store data is stored.

" + "documentation":"

The name of the S3 bucket in which data store data is stored.

" }, "keyPrefix":{ "shape":"S3KeyPrefix", - "documentation":"

[Optional] The prefix used to create the keys of the data store data objects. Each object in an Amazon S3 bucket has a key that is its unique identifier within the bucket (each object in a bucket has exactly one key). The prefix must end with a '/'.

" + "documentation":"

Optional. The prefix used to create the keys of the data store data objects. Each object in an S3 bucket has a key that is its unique identifier in the bucket. Each object in a bucket has exactly one key. The prefix must end with a forward slash (/).

" }, "roleArn":{ "shape":"RoleArn", - "documentation":"

The ARN of the role which grants AWS IoT Analytics permission to interact with your Amazon S3 resources.

" + "documentation":"

The ARN of the role that grants AWS IoT Analytics permission to interact with your Amazon S3 resources.

" } }, - "documentation":"

Use this to store data store data in an S3 bucket that you manage. When customer managed storage is selected, the \"retentionPeriod\" parameter is ignored. The choice of service-managed or customer-managed S3 storage cannot be changed after creation of the data store.

" + "documentation":"

Use this to store data store data in an S3 bucket that you manage. When customer-managed storage is selected, the retentionPeriod parameter is ignored. You cannot change the choice of service-managed or customer-managed S3 storage after the data store is created.

" }, "CustomerManagedDatastoreS3StorageSummary":{ "type":"structure", "members":{ "bucket":{ "shape":"BucketName", - "documentation":"

The name of the Amazon S3 bucket in which data store data is stored.

" + "documentation":"

The name of the S3 bucket in which data store data is stored.

" }, "keyPrefix":{ "shape":"S3KeyPrefix", - "documentation":"

[Optional] The prefix used to create the keys of the data store data objects. Each object in an Amazon S3 bucket has a key that is its unique identifier within the bucket (each object in a bucket has exactly one key). The prefix must end with a '/'.

" + "documentation":"

Optional. The prefix used to create the keys of the data store data objects. Each object in an S3 bucket has a key that is its unique identifier in the bucket. Each object in a bucket has exactly one key. The prefix must end with a forward slash (/).

" }, "roleArn":{ "shape":"RoleArn", - "documentation":"

The ARN of the role which grants AWS IoT Analytics permission to interact with your Amazon S3 resources.

" + "documentation":"

The ARN of the role that grants AWS IoT Analytics permission to interact with your Amazon S3 resources.

" } }, "documentation":"

Used to store data store data in an S3 bucket that you manage.

" @@ -1185,15 +1201,15 @@ }, "actions":{ "shape":"DatasetActions", - "documentation":"

The \"DatasetAction\" objects that automatically create the data set contents.

" + "documentation":"

The DatasetAction objects that automatically create the data set contents.

" }, "triggers":{ "shape":"DatasetTriggers", - "documentation":"

The \"DatasetTrigger\" objects that specify when the data set is automatically updated.

" + "documentation":"

The DatasetTrigger objects that specify when the data set is automatically updated.

" }, "contentDeliveryRules":{ "shape":"DatasetContentDeliveryRules", - "documentation":"

When data set contents are created they are delivered to destinations specified here.

" + "documentation":"

When dataset contents are created they are delivered to destinations specified here.

" }, "status":{ "shape":"DatasetStatus", @@ -1209,11 +1225,15 @@ }, "retentionPeriod":{ "shape":"RetentionPeriod", - "documentation":"

[Optional] How long, in days, message data is kept for the data set.

" + "documentation":"

Optional. How long, in days, message data is kept for the data set.

" }, "versioningConfiguration":{ "shape":"VersioningConfiguration", - "documentation":"

[Optional] How many versions of data set contents are kept. If not specified or set to null, only the latest version plus the latest succeeded version (if they are different) are kept for the time period specified by the \"retentionPeriod\" parameter. (For more information, see https://docs.aws.amazon.com/iotanalytics/latest/userguide/getting-started.html#aws-iot-analytics-dataset-versions)

" + "documentation":"

Optional. How many versions of dataset contents are kept. If not specified or set to null, only the latest version plus the latest succeeded version (if they are different) are kept for the time period specified by the retentionPeriod parameter. For more information, see Keeping Multiple Versions of AWS IoT Analytics Data Sets in the AWS IoT Analytics User Guide.

" + }, + "lateDataRules":{ + "shape":"LateDataRules", + "documentation":"

A list of data rules that send notifications to Amazon CloudWatch, when data arrives late. To specify lateDataRules, the dataset must use a DeltaTimer filter.

" } }, "documentation":"

Information about a data set.

" @@ -1227,14 +1247,14 @@ }, "queryAction":{ "shape":"SqlQueryDatasetAction", - "documentation":"

An \"SqlQueryDatasetAction\" object that uses an SQL query to automatically create data set contents.

" + "documentation":"

An SqlQueryDatasetAction object that uses an SQL query to automatically create data set contents.

" }, "containerAction":{ "shape":"ContainerDatasetAction", - "documentation":"

Information which allows the system to run a containerized application in order to create the data set contents. The application must be in a Docker container along with any needed support libraries.

" + "documentation":"

Information that allows the system to run a containerized application to create the dataset contents. The application must be in a Docker container along with any required support libraries.

" } }, - "documentation":"

A \"DatasetAction\" object that specifies how data set contents are automatically created.

" + "documentation":"

A DatasetAction object that specifies how data set contents are automatically created.

" }, "DatasetActionName":{ "type":"string", @@ -1253,14 +1273,14 @@ "members":{ "actionName":{ "shape":"DatasetActionName", - "documentation":"

The name of the action which automatically creates the data set's contents.

" + "documentation":"

The name of the action that automatically creates the dataset's contents.

" }, "actionType":{ "shape":"DatasetActionType", - "documentation":"

The type of action by which the data set's contents are automatically created.

" + "documentation":"

The type of action by which the dataset's contents are automatically created.

" } }, - "documentation":"

Information about the action which automatically creates the data set's contents.

" + "documentation":"

Information about the action that automatically creates the dataset's contents.

" }, "DatasetActionType":{ "type":"string", @@ -1281,14 +1301,14 @@ "members":{ "iotEventsDestinationConfiguration":{ "shape":"IotEventsDestinationConfiguration", - "documentation":"

Configuration information for delivery of data set contents to AWS IoT Events.

" + "documentation":"

Configuration information for delivery of dataset contents to AWS IoT Events.

" }, "s3DestinationConfiguration":{ "shape":"S3DestinationConfiguration", - "documentation":"

Configuration information for delivery of data set contents to Amazon S3.

" + "documentation":"

Configuration information for delivery of dataset contents to Amazon S3.

" } }, - "documentation":"

The destination to which data set contents are delivered.

" + "documentation":"

The destination to which dataset contents are delivered.

" }, "DatasetContentDeliveryRule":{ "type":"structure", @@ -1296,14 +1316,14 @@ "members":{ "entryName":{ "shape":"EntryName", - "documentation":"

The name of the data set content delivery rules entry.

" + "documentation":"

The name of the dataset content delivery rules entry.

" }, "destination":{ "shape":"DatasetContentDeliveryDestination", - "documentation":"

The destination to which data set contents are delivered.

" + "documentation":"

The destination to which dataset contents are delivered.

" } }, - "documentation":"

When data set contents are created they are delivered to destination specified here.

" + "documentation":"

When dataset contents are created, they are delivered to destination specified here.

" }, "DatasetContentDeliveryRules":{ "type":"list", @@ -1324,7 +1344,7 @@ "members":{ "state":{ "shape":"DatasetContentState", - "documentation":"

The state of the data set contents. Can be one of \"READY\", \"CREATING\", \"SUCCEEDED\" or \"FAILED\".

" + "documentation":"

The state of the data set contents. Can be one of READY, CREATING, SUCCEEDED, or FAILED.

" }, "reason":{ "shape":"Reason", @@ -1342,7 +1362,7 @@ "members":{ "version":{ "shape":"DatasetContentVersion", - "documentation":"

The version of the data set contents.

" + "documentation":"

The version of the dataset contents.

" }, "status":{ "shape":"DatasetContentStatus", @@ -1350,18 +1370,18 @@ }, "creationTime":{ "shape":"Timestamp", - "documentation":"

The actual time the creation of the data set contents was started.

" + "documentation":"

The actual time the creation of the dataset contents was started.

" }, "scheduleTime":{ "shape":"Timestamp", - "documentation":"

The time the creation of the data set contents was scheduled to start.

" + "documentation":"

The time the creation of the dataset contents was scheduled to start.

" }, "completionTime":{ "shape":"Timestamp", "documentation":"

The time the dataset content status was updated to SUCCEEDED or FAILED.

" } }, - "documentation":"

Summary information about data set contents.

" + "documentation":"

Summary information about dataset contents.

" }, "DatasetContentVersion":{ "type":"string", @@ -1374,10 +1394,10 @@ "members":{ "datasetName":{ "shape":"DatasetName", - "documentation":"

The name of the data set whose latest contents are used as input to the notebook or application.

" + "documentation":"

The name of the dataset whose latest contents are used as input to the notebook or application.

" } }, - "documentation":"

The data set whose latest contents are used as input to the notebook or application.

" + "documentation":"

The dataset whose latest contents are used as input to the notebook or application.

" }, "DatasetEntries":{ "type":"list", @@ -1392,7 +1412,7 @@ }, "dataURI":{ "shape":"PresignedURI", - "documentation":"

The pre-signed URI of the data set item.

" + "documentation":"

The presigned URI of the data set item.

" } }, "documentation":"

The reference to a data set entry.

" @@ -1436,11 +1456,11 @@ }, "triggers":{ "shape":"DatasetTriggers", - "documentation":"

A list of triggers. A trigger causes data set content to be populated at a specified time interval or when another data set is populated. The list of triggers can be empty or contain up to five DataSetTrigger objects

" + "documentation":"

A list of triggers. A trigger causes data set content to be populated at a specified time interval or when another data set is populated. The list of triggers can be empty or contain up to five DataSetTrigger objects

" }, "actions":{ "shape":"DatasetActionSummaries", - "documentation":"

A list of \"DataActionSummary\" objects.

" + "documentation":"

A list of DataActionSummary objects.

" } }, "documentation":"

A summary of information about a data set.

" @@ -1450,14 +1470,14 @@ "members":{ "schedule":{ "shape":"Schedule", - "documentation":"

The \"Schedule\" when the trigger is initiated.

" + "documentation":"

The Schedule when the trigger is initiated.

" }, "dataset":{ "shape":"TriggeringDataset", "documentation":"

The data set whose content creation triggers the creation of this data set's contents.

" } }, - "documentation":"

The \"DatasetTrigger\" that specifies when the data set is automatically updated.

" + "documentation":"

The DatasetTrigger that specifies when the data set is automatically updated.

" }, "DatasetTriggers":{ "type":"list", @@ -1474,7 +1494,7 @@ }, "storage":{ "shape":"DatastoreStorage", - "documentation":"

Where data store data is stored. You may choose one of \"serviceManagedS3\" or \"customerManagedS3\" storage. If not specified, the default is \"serviceManagedS3\". This cannot be changed after the data store is created.

" + "documentation":"

Where data store data is stored. You can choose one of serviceManagedS3 or customerManagedS3 storage. If not specified, the default is serviceManagedS3. You cannot change this storage option after the data store is created.

" }, "arn":{ "shape":"DatastoreArn", @@ -1486,7 +1506,7 @@ }, "retentionPeriod":{ "shape":"RetentionPeriod", - "documentation":"

How long, in days, message data is kept for the data store. When \"customerManagedS3\" storage is selected, this parameter is ignored.

" + "documentation":"

How long, in days, message data is kept for the data store. When customerManagedS3 storage is selected, this parameter is ignored.

" }, "creationTime":{ "shape":"Timestamp", @@ -1495,6 +1515,10 @@ "lastUpdateTime":{ "shape":"Timestamp", "documentation":"

The last time the data store was updated.

" + }, + "lastMessageArrivalTime":{ + "shape":"Timestamp", + "documentation":"

The last time when a new message arrived in the data store.

AWS IoT Analytics updates this value at most once per minute for one data store. Hence, the lastMessageArrivalTime value is an approximation.

This feature only applies to messages that arrived in the data store after October 23, 2020.

" } }, "documentation":"

Information about a data store.

" @@ -1508,14 +1532,14 @@ "members":{ "name":{ "shape":"ActivityName", - "documentation":"

The name of the 'datastore' activity.

" + "documentation":"

The name of the datastore activity.

" }, "datastoreName":{ "shape":"DatastoreName", "documentation":"

The name of the data store where processed messages are stored.

" } }, - "documentation":"

The 'datastore' activity that specifies where to store the processed data.

" + "documentation":"

The datastore activity that specifies where to store the processed data.

" }, "DatastoreArn":{"type":"string"}, "DatastoreName":{ @@ -1547,21 +1571,21 @@ "members":{ "serviceManagedS3":{ "shape":"ServiceManagedDatastoreS3Storage", - "documentation":"

Use this to store data store data in an S3 bucket managed by the AWS IoT Analytics service. The choice of service-managed or customer-managed S3 storage cannot be changed after creation of the data store.

" + "documentation":"

Use this to store data store data in an S3 bucket managed by AWS IoT Analytics. You cannot change the choice of service-managed or customer-managed S3 storage after the data store is created.

" }, "customerManagedS3":{ "shape":"CustomerManagedDatastoreS3Storage", - "documentation":"

Use this to store data store data in an S3 bucket that you manage. When customer managed storage is selected, the \"retentionPeriod\" parameter is ignored. The choice of service-managed or customer-managed S3 storage cannot be changed after creation of the data store.

" + "documentation":"

Use this to store data store data in an S3 bucket that you manage. When customer managed storage is selected, the retentionPeriod parameter is ignored. The choice of service-managed or customer-managed S3 storage cannot be changed after creation of the data store.

" } }, - "documentation":"

Where data store data is stored. You may choose one of \"serviceManagedS3\" or \"customerManagedS3\" storage. If not specified, the default is \"serviceManagedS3\". This cannot be changed after the data store is created.

" + "documentation":"

Where data store data is stored. You can choose one of serviceManagedS3 or customerManagedS3 storage. If not specified, the default is serviceManagedS3. You cannot change this storage option after the data store is created.

" }, "DatastoreStorageSummary":{ "type":"structure", "members":{ "serviceManagedS3":{ "shape":"ServiceManagedDatastoreS3StorageSummary", - "documentation":"

Used to store data store data in an S3 bucket managed by the AWS IoT Analytics service.

" + "documentation":"

Used to store data store data in an S3 bucket managed by AWS IoT Analytics.

" }, "customerManagedS3":{ "shape":"CustomerManagedDatastoreS3StorageSummary", @@ -1596,6 +1620,10 @@ "lastUpdateTime":{ "shape":"Timestamp", "documentation":"

The last time the data store was updated.

" + }, + "lastMessageArrivalTime":{ + "shape":"Timestamp", + "documentation":"

The last time when a new message arrived in the data store.

AWS IoT Analytics updates this value at most once per minute for one data store. Hence, the lastMessageArrivalTime value is an approximation.

This feature only applies to messages that arrived in the data store after October 23, 2020.

" } }, "documentation":"

A summary of information about a data store.

" @@ -1618,13 +1646,13 @@ "members":{ "datasetName":{ "shape":"DatasetName", - "documentation":"

The name of the data set whose content is deleted.

", + "documentation":"

The name of the dataset whose content is deleted.

", "location":"uri", "locationName":"datasetName" }, "versionId":{ "shape":"DatasetContentVersion", - "documentation":"

The version of the data set whose content is deleted. You can also use the strings \"$LATEST\" or \"$LATEST_SUCCEEDED\" to delete the latest or latest successfully completed data set. If not specified, \"$LATEST_SUCCEEDED\" is the default.

", + "documentation":"

The version of the dataset whose content is deleted. You can also use the strings \"$LATEST\" or \"$LATEST_SUCCEEDED\" to delete the latest or latest successfully completed data set. If not specified, \"$LATEST_SUCCEEDED\" is the default.

", "location":"querystring", "locationName":"versionId" } @@ -1675,15 +1703,26 @@ "members":{ "offsetSeconds":{ "shape":"OffsetSeconds", - "documentation":"

The number of seconds of estimated \"in flight\" lag time of message data. When you create data set contents using message data from a specified time frame, some message data may still be \"in flight\" when processing begins, and so will not arrive in time to be processed. Use this field to make allowances for the \"in flight\" time of your message data, so that data not processed from a previous time frame will be included with the next time frame. Without this, missed message data would be excluded from processing during the next time frame as well, because its timestamp places it within the previous time frame.

" + "documentation":"

The number of seconds of estimated in-flight lag time of message data. When you create dataset contents using message data from a specified timeframe, some message data might still be in flight when processing begins, and so do not arrive in time to be processed. Use this field to make allowances for the in flight time of your message data, so that data not processed from a previous timeframe is included with the next timeframe. Otherwise, missed message data would be excluded from processing during the next timeframe too, because its timestamp places it within the previous timeframe.

" }, "timeExpression":{ "shape":"TimeExpression", - "documentation":"

An expression by which the time of the message data may be determined. This may be the name of a timestamp field, or a SQL expression which is used to derive the time the message data was generated.

" + "documentation":"

An expression by which the time of the message data might be determined. This can be the name of a timestamp field or a SQL expression that is used to derive the time the message data was generated.

" } }, "documentation":"

Used to limit data to that which has arrived since the last execution of the action.

" }, + "DeltaTimeSessionWindowConfiguration":{ + "type":"structure", + "required":["timeoutInMinutes"], + "members":{ + "timeoutInMinutes":{ + "shape":"SessionTimeoutInMinutes", + "documentation":"

A time interval. You can use timeoutInMinutes so that AWS IoT Analytics can batch up late data notifications that have been generated since the last execution. AWS IoT Analytics sends one batch of notifications to Amazon CloudWatch Events at one time.

For more information about how to write a timestamp expression, see Date and Time Functions and Operators, in the Presto 0.172 Documentation.

" + } + }, + "documentation":"

A structure that contains the configuration information of a delta time session window.

DeltaTime specifies a time interval. You can use DeltaTime to create dataset contents with data that has arrived in the data store since the last execution. For an example of DeltaTime, see Creating a SQL dataset with a delta window (CLI) in the AWS IoT Analytics User Guide.

" + }, "DescribeChannelRequest":{ "type":"structure", "required":["channelName"], @@ -1711,7 +1750,7 @@ }, "statistics":{ "shape":"ChannelStatistics", - "documentation":"

Statistics about the channel. Included if the 'includeStatistics' parameter is set to true in the request.

" + "documentation":"

Statistics about the channel. Included if the includeStatistics parameter is set to true in the request.

" } } }, @@ -1763,7 +1802,7 @@ }, "statistics":{ "shape":"DatastoreStatistics", - "documentation":"

Additional statistical information about the data store. Included if the 'includeStatistics' parameter is set to true in the request.

" + "documentation":"

Additional statistical information about the data store. Included if the includeStatistics parameter is set to true in the request.

" } } }, @@ -1798,7 +1837,7 @@ "members":{ "pipeline":{ "shape":"Pipeline", - "documentation":"

A \"Pipeline\" object that contains information about the pipeline.

" + "documentation":"

A Pipeline object that contains information about the pipeline.

" } } }, @@ -1813,7 +1852,7 @@ "members":{ "name":{ "shape":"ActivityName", - "documentation":"

The name of the 'deviceRegistryEnrich' activity.

" + "documentation":"

The name of the deviceRegistryEnrich activity.

" }, "attribute":{ "shape":"AttributeName", @@ -1845,7 +1884,7 @@ "members":{ "name":{ "shape":"ActivityName", - "documentation":"

The name of the 'deviceShadowEnrich' activity.

" + "documentation":"

The name of the deviceShadowEnrich activity.

" }, "attribute":{ "shape":"AttributeName", @@ -1864,7 +1903,7 @@ "documentation":"

The next activity in the pipeline.

" } }, - "documentation":"

An activity that adds information from the AWS IoT Device Shadows service to a message.

" + "documentation":"

An activity that adds information from the AWS IoT Device Shadow service to a message.

" }, "DoubleValue":{"type":"double"}, "EndTime":{"type":"timestamp"}, @@ -1876,7 +1915,7 @@ "members":{ "estimatedSizeInBytes":{ "shape":"SizeInBytes", - "documentation":"

The estimated size of the resource in bytes.

" + "documentation":"

The estimated size of the resource, in bytes.

" }, "estimatedOn":{ "shape":"Timestamp", @@ -1894,11 +1933,11 @@ "members":{ "name":{ "shape":"ActivityName", - "documentation":"

The name of the 'filter' activity.

" + "documentation":"

The name of the filter activity.

" }, "filter":{ "shape":"FilterExpression", - "documentation":"

An expression that looks like a SQL WHERE clause that must return a Boolean value.

" + "documentation":"

An expression that looks like a SQL WHERE clause that must return a Boolean value. Messages that satisfy the condition are passed to the next activity.

" }, "next":{ "shape":"ActivityName", @@ -1935,7 +1974,7 @@ "members":{ "entries":{ "shape":"DatasetEntries", - "documentation":"

A list of \"DatasetEntry\" objects.

" + "documentation":"

A list of DatasetEntry objects.

" }, "timestamp":{ "shape":"Timestamp", @@ -1956,14 +1995,14 @@ "members":{ "tableName":{ "shape":"GlueTableName", - "documentation":"

The name of the table in your AWS Glue Data Catalog which is used to perform the ETL (extract, transform and load) operations. (An AWS Glue Data Catalog table contains partitioned data and descriptions of data sources and targets.)

" + "documentation":"

The name of the table in your AWS Glue Data Catalog that is used to perform the ETL operations. An AWS Glue Data Catalog table contains partitioned data and descriptions of data sources and targets.

" }, "databaseName":{ "shape":"GlueDatabaseName", - "documentation":"

The name of the database in your AWS Glue Data Catalog in which the table is located. (An AWS Glue Data Catalog database contains Glue Data tables.)

" + "documentation":"

The name of the database in your AWS Glue Data Catalog in which the table is located. An AWS Glue Data Catalog database contains metadata tables.

" } }, - "documentation":"

Configuration information for coordination with the AWS Glue ETL (extract, transform and load) service.

" + "documentation":"

Configuration information for coordination with AWS Glue, a fully managed extract, transform and load (ETL) service.

" }, "GlueDatabaseName":{ "type":"string", @@ -2010,14 +2049,14 @@ "members":{ "inputName":{ "shape":"IotEventsInputName", - "documentation":"

The name of the AWS IoT Events input to which data set contents are delivered.

" + "documentation":"

The name of the AWS IoT Events input to which dataset contents are delivered.

" }, "roleArn":{ "shape":"RoleArn", - "documentation":"

The ARN of the role which grants AWS IoT Analytics permission to deliver data set contents to an AWS IoT Events input.

" + "documentation":"

The ARN of the role that grants AWS IoT Analytics permission to deliver dataset contents to an AWS IoT Events input.

" } }, - "documentation":"

Configuration information for delivery of data set contents to AWS IoT Events.

" + "documentation":"

Configuration information for delivery of dataset contents to AWS IoT Events.

" }, "IotEventsInputName":{ "type":"string", @@ -2035,7 +2074,7 @@ "members":{ "name":{ "shape":"ActivityName", - "documentation":"

The name of the 'lambda' activity.

" + "documentation":"

The name of the lambda activity.

" }, "lambdaName":{ "shape":"LambdaName", @@ -2043,7 +2082,7 @@ }, "batchSize":{ "shape":"ActivityBatchSize", - "documentation":"

The number of messages passed to the Lambda function for processing.

The AWS Lambda function must be able to process all of these messages within five minutes, which is the maximum timeout duration for Lambda functions.

" + "documentation":"

The number of messages passed to the Lambda function for processing.

The Lambda function must be able to process all of these messages within five minutes, which is the maximum timeout duration for Lambda functions.

" }, "next":{ "shape":"ActivityName", @@ -2058,6 +2097,43 @@ "min":1, "pattern":"^[a-zA-Z0-9_-]+$" }, + "LateDataRule":{ + "type":"structure", + "required":["ruleConfiguration"], + "members":{ + "ruleName":{ + "shape":"LateDataRuleName", + "documentation":"

The name of the late data rule.

" + }, + "ruleConfiguration":{ + "shape":"LateDataRuleConfiguration", + "documentation":"

The information needed to configure the late data rule.

" + } + }, + "documentation":"

A structure that contains the name and configuration information of a late data rule.

" + }, + "LateDataRuleConfiguration":{ + "type":"structure", + "members":{ + "deltaTimeSessionWindowConfiguration":{ + "shape":"DeltaTimeSessionWindowConfiguration", + "documentation":"

The information needed to configure a delta time session window.

" + } + }, + "documentation":"

The information needed to configure a delta time session window.

" + }, + "LateDataRuleName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[a-zA-Z0-9_]+$" + }, + "LateDataRules":{ + "type":"list", + "member":{"shape":"LateDataRule"}, + "max":1, + "min":1 + }, "LimitExceededException":{ "type":"structure", "members":{ @@ -2089,7 +2165,7 @@ "members":{ "channelSummaries":{ "shape":"ChannelSummaries", - "documentation":"

A list of \"ChannelSummary\" objects.

" + "documentation":"

A list of ChannelSummary objects.

" }, "nextToken":{ "shape":"NextToken", @@ -2121,13 +2197,13 @@ }, "scheduledOnOrAfter":{ "shape":"Timestamp", - "documentation":"

A filter to limit results to those data set contents whose creation is scheduled on or after the given time. See the field triggers.schedule in the CreateDataset request. (timestamp)

", + "documentation":"

A filter to limit results to those data set contents whose creation is scheduled on or after the given time. See the field triggers.schedule in the CreateDataset request. (timestamp)

", "location":"querystring", "locationName":"scheduledOnOrAfter" }, "scheduledBefore":{ "shape":"Timestamp", - "documentation":"

A filter to limit results to those data set contents whose creation is scheduled before the given time. See the field triggers.schedule in the CreateDataset request. (timestamp)

", + "documentation":"

A filter to limit results to those data set contents whose creation is scheduled before the given time. See the field triggers.schedule in the CreateDataset request. (timestamp)

", "location":"querystring", "locationName":"scheduledBefore" } @@ -2168,7 +2244,7 @@ "members":{ "datasetSummaries":{ "shape":"DatasetSummaries", - "documentation":"

A list of \"DatasetSummary\" objects.

" + "documentation":"

A list of DatasetSummary objects.

" }, "nextToken":{ "shape":"NextToken", @@ -2198,7 +2274,7 @@ "members":{ "datastoreSummaries":{ "shape":"DatastoreSummaries", - "documentation":"

A list of \"DatastoreSummary\" objects.

" + "documentation":"

A list of DatastoreSummary objects.

" }, "nextToken":{ "shape":"NextToken", @@ -2228,7 +2304,7 @@ "members":{ "pipelineSummaries":{ "shape":"PipelineSummaries", - "documentation":"

A list of \"PipelineSummary\" objects.

" + "documentation":"

A list of PipelineSummary objects.

" }, "nextToken":{ "shape":"NextToken", @@ -2253,7 +2329,7 @@ "members":{ "tags":{ "shape":"TagList", - "documentation":"

The tags (metadata) which you have assigned to the resource.

" + "documentation":"

The tags (metadata) that you have assigned to the resource.

" } } }, @@ -2277,7 +2353,7 @@ }, "level":{ "shape":"LoggingLevel", - "documentation":"

The logging level. Currently, only \"ERROR\" is supported.

" + "documentation":"

The logging level. Currently, only ERROR is supported.

" }, "enabled":{ "shape":"LoggingEnabled", @@ -2296,7 +2372,7 @@ "members":{ "name":{ "shape":"ActivityName", - "documentation":"

The name of the 'math' activity.

" + "documentation":"

The name of the math activity.

" }, "attribute":{ "shape":"AttributeName", @@ -2342,11 +2418,11 @@ "members":{ "messageId":{ "shape":"MessageId", - "documentation":"

The ID you wish to assign to the message. Each \"messageId\" must be unique within each batch sent.

" + "documentation":"

The ID you want to assign to the message. Each messageId must be unique within each batch sent.

" }, "payload":{ "shape":"MessagePayload", - "documentation":"

The payload of the message. This may be a JSON string or a Base64-encoded string representing binary data (in which case you must decode it by means of a pipeline activity).

" + "documentation":"

The payload of the message. This can be a JSON string or a base64-encoded string representing binary data, in which case you must decode it by means of a pipeline activity.

" } }, "documentation":"

Information about a message.

" @@ -2379,7 +2455,7 @@ "members":{ "fileName":{ "shape":"OutputFileName", - "documentation":"

The URI of the location where data set contents are stored, usually the URI of a file in an S3 bucket.

" + "documentation":"

The URI of the location where dataset contents are stored, usually the URI of a file in an S3 bucket.

" } }, "documentation":"

The value of the variable as a structure that specifies an output file URI.

" @@ -2461,7 +2537,7 @@ }, "deviceShadowEnrich":{ "shape":"DeviceShadowEnrichActivity", - "documentation":"

Adds information from the AWS IoT Device Shadows service to a message.

" + "documentation":"

Adds information from the AWS IoT Device Shadow service to a message.

" } }, "documentation":"

An activity that performs a transformation on a message.

" @@ -2518,7 +2594,7 @@ "documentation":"

Used to limit data to that which has arrived since the last execution of the action.

" } }, - "documentation":"

Information which is used to filter message data, to segregate it according to the time frame in which it arrives.

" + "documentation":"

Information that is used to filter message data, to segregate it according to the timeframe in which it arrives.

" }, "QueryFilters":{ "type":"list", @@ -2536,7 +2612,7 @@ "members":{ "name":{ "shape":"ActivityName", - "documentation":"

The name of the 'removeAttributes' activity.

" + "documentation":"

The name of the removeAttributes activity.

" }, "attributes":{ "shape":"AttributeNames", @@ -2568,7 +2644,7 @@ "members":{ "id":{ "shape":"ReprocessingId", - "documentation":"

The 'reprocessingId' returned by \"StartPipelineReprocessing\".

" + "documentation":"

The reprocessingId returned by StartPipelineReprocessing.

" }, "status":{ "shape":"ReprocessingStatus", @@ -2612,14 +2688,14 @@ "members":{ "computeType":{ "shape":"ComputeType", - "documentation":"

The type of the compute resource used to execute the \"containerAction\". Possible values are: ACU_1 (vCPU=4, memory=16GiB) or ACU_2 (vCPU=8, memory=32GiB).

" + "documentation":"

The type of the compute resource used to execute the containerAction. Possible values are: ACU_1 (vCPU=4, memory=16 GiB) or ACU_2 (vCPU=8, memory=32 GiB).

" }, "volumeSizeInGB":{ "shape":"VolumeSizeInGB", - "documentation":"

The size (in GB) of the persistent storage available to the resource instance used to execute the \"containerAction\" (min: 1, max: 50).

" + "documentation":"

The size, in GB, of the persistent storage available to the resource instance used to execute the containerAction (min: 1, max: 50).

" } }, - "documentation":"

The configuration of the resource used to execute the \"containerAction\".

" + "documentation":"

The configuration of the resource used to execute the containerAction.

" }, "ResourceNotFoundException":{ "type":"structure", @@ -2639,7 +2715,7 @@ }, "numberOfDays":{ "shape":"RetentionPeriodInDays", - "documentation":"

The number of days that message data is kept. The \"unlimited\" parameter must be false.

" + "documentation":"

The number of days that message data is kept. The unlimited parameter must be false.

" } }, "documentation":"

How long, in days, message data is kept.

" @@ -2662,7 +2738,7 @@ "members":{ "pipelineActivity":{ "shape":"PipelineActivity", - "documentation":"

The pipeline activity that is run. This must not be a 'channel' activity or a 'datastore' activity because these activities are used in a pipeline only to load the original message and to store the (possibly) transformed message. If a 'lambda' activity is specified, only short-running Lambda functions (those with a timeout of less than 30 seconds or less) can be used.

" + "documentation":"

The pipeline activity that is run. This must not be a channel activity or a datastore activity because these activities are used in a pipeline only to load the original message and to store the (possibly) transformed message. If a lambda activity is specified, only short-running Lambda functions (those with a timeout of less than 30 seconds or less) can be used.

" }, "payloads":{ "shape":"MessagePayloads", @@ -2693,22 +2769,22 @@ "members":{ "bucket":{ "shape":"BucketName", - "documentation":"

The name of the Amazon S3 bucket to which data set contents are delivered.

" + "documentation":"

The name of the S3 bucket to which dataset contents are delivered.

" }, "key":{ "shape":"BucketKeyExpression", - "documentation":"

The key of the data set contents object. Each object in an Amazon S3 bucket has a key that is its unique identifier within the bucket (each object in a bucket has exactly one key). To produce a unique key, you can use \"!{iotanalytics:scheduledTime}\" to insert the time of the scheduled SQL query run, or \"!{iotanalytics:versioned} to insert a unique hash identifying the data set, for example: \"/DataSet/!{iotanalytics:scheduledTime}/!{iotanalytics:versioned}.csv\".

" + "documentation":"

The key of the dataset contents object in an S3 bucket. Each object has a key that is a unique identifier. Each object has exactly one key.

You can create a unique key with the following options:

  • Use !{iotanalytics:scheduleTime} to insert the time of a scheduled SQL query run.

  • Use !{iotanalytics:versionId} to insert a unique hash that identifies a dataset content.

  • Use !{iotanalytics:creationTime} to insert the creation time of a dataset content.

The following example creates a unique key for a CSV file: dataset/mydataset/!{iotanalytics:scheduleTime}/!{iotanalytics:versionId}.csv

If you don't use !{iotanalytics:versionId} to specify the key, you might get duplicate keys. For example, you might have two dataset contents with the same scheduleTime but different versionIds. This means that one dataset content overwrites the other.

" }, "glueConfiguration":{ "shape":"GlueConfiguration", - "documentation":"

Configuration information for coordination with the AWS Glue ETL (extract, transform and load) service.

" + "documentation":"

Configuration information for coordination with AWS Glue, a fully managed extract, transform and load (ETL) service.

" }, "roleArn":{ "shape":"RoleArn", - "documentation":"

The ARN of the role which grants AWS IoT Analytics permission to interact with your Amazon S3 and AWS Glue resources.

" + "documentation":"

The ARN of the role that grants AWS IoT Analytics permission to interact with your Amazon S3 and AWS Glue resources.

" } }, - "documentation":"

Configuration information for delivery of data set contents to Amazon S3.

" + "documentation":"

Configuration information for delivery of dataset contents to Amazon Simple Storage Service (Amazon S3).

" }, "S3KeyPrefix":{ "type":"string", @@ -2728,7 +2804,7 @@ }, "maxMessages":{ "shape":"MaxMessages", - "documentation":"

The number of sample messages to be retrieved. The limit is 10, the default is also 10.

", + "documentation":"

The number of sample messages to be retrieved. The limit is 10. The default is also 10.

", "location":"querystring", "locationName":"maxMessages" }, @@ -2760,7 +2836,7 @@ "members":{ "expression":{ "shape":"ScheduleExpression", - "documentation":"

The expression that defines when to trigger an update. For more information, see Schedule Expressions for Rules in the Amazon CloudWatch Events User Guide.

" + "documentation":"

The expression that defines when to trigger an update. For more information, see Schedule Expressions for Rules in the Amazon CloudWatch Events User Guide.

" } }, "documentation":"

The schedule for when to trigger an update.

" @@ -2775,7 +2851,7 @@ "members":{ "name":{ "shape":"ActivityName", - "documentation":"

The name of the 'selectAttributes' activity.

" + "documentation":"

The name of the selectAttributes activity.

" }, "attributes":{ "shape":"AttributeNames", @@ -2792,25 +2868,25 @@ "type":"structure", "members":{ }, - "documentation":"

Use this to store channel data in an S3 bucket managed by the AWS IoT Analytics service. The choice of service-managed or customer-managed S3 storage cannot be changed after creation of the channel.

" + "documentation":"

Use this to store channel data in an S3 bucket managed by AWS IoT Analytics. You cannot change the choice of service-managed or customer-managed S3 storage after the channel is created.

" }, "ServiceManagedChannelS3StorageSummary":{ "type":"structure", "members":{ }, - "documentation":"

Used to store channel data in an S3 bucket managed by the AWS IoT Analytics service.

" + "documentation":"

Used to store channel data in an S3 bucket managed by AWS IoT Analytics.

" }, "ServiceManagedDatastoreS3Storage":{ "type":"structure", "members":{ }, - "documentation":"

Use this to store data store data in an S3 bucket managed by the AWS IoT Analytics service. The choice of service-managed or customer-managed S3 storage cannot be changed after creation of the data store.

" + "documentation":"

Use this to store data store data in an S3 bucket managed by AWS IoT Analytics. You cannot change the choice of service-managed or customer-managed S3 storage after the data store is created.

" }, "ServiceManagedDatastoreS3StorageSummary":{ "type":"structure", "members":{ }, - "documentation":"

Used to store data store data in an S3 bucket managed by the AWS IoT Analytics service.

" + "documentation":"

Used to store data store data in an S3 bucket managed by AWS IoT Analytics.

" }, "ServiceUnavailableException":{ "type":"structure", @@ -2822,6 +2898,11 @@ "exception":true, "fault":true }, + "SessionTimeoutInMinutes":{ + "type":"integer", + "max":60, + "min":1 + }, "SizeInBytes":{"type":"double"}, "SqlQuery":{"type":"string"}, "SqlQueryDatasetAction":{ @@ -2834,7 +2915,7 @@ }, "filters":{ "shape":"QueryFilters", - "documentation":"

Pre-filters applied to message data.

" + "documentation":"

Prefilters applied to message data.

" } }, "documentation":"

The SQL query to modify the message.

" @@ -2890,7 +2971,7 @@ "documentation":"

The tag's value.

" } }, - "documentation":"

A set of key/value pairs which are used to manage the resource.

" + "documentation":"

A set of key-value pairs that are used to manage the resource.

" }, "TagKey":{ "type":"string", @@ -2955,10 +3036,10 @@ "members":{ "name":{ "shape":"DatasetName", - "documentation":"

The name of the data set whose content generation triggers the new data set content generation.

" + "documentation":"

The name of the dataset whose content generation triggers the new dataset content generation.

" } }, - "documentation":"

Information about the data set whose content generation triggers the new data set content generation.

" + "documentation":"

Information about the dataset whose content generation triggers the new dataset content generation.

" }, "UnlimitedRetentionPeriod":{"type":"boolean"}, "UnlimitedVersioning":{"type":"boolean"}, @@ -3000,7 +3081,7 @@ }, "channelStorage":{ "shape":"ChannelStorage", - "documentation":"

Where channel data is stored. You may choose one of \"serviceManagedS3\" or \"customerManagedS3\" storage. If not specified, the default is \"serviceManagedS3\". This cannot be changed after creation of the channel.

" + "documentation":"

Where channel data is stored. You can choose one of serviceManagedS3 or customerManagedS3 storage. If not specified, the default is serviceManagedS3. You cannot change this storage option after the channel is created.

" }, "retentionPeriod":{ "shape":"RetentionPeriod", @@ -3023,23 +3104,27 @@ }, "actions":{ "shape":"DatasetActions", - "documentation":"

A list of \"DatasetAction\" objects.

" + "documentation":"

A list of DatasetAction objects.

" }, "triggers":{ "shape":"DatasetTriggers", - "documentation":"

A list of \"DatasetTrigger\" objects. The list can be empty or can contain up to five DataSetTrigger objects.

" + "documentation":"

A list of DatasetTrigger objects. The list can be empty or can contain up to five DatasetTrigger objects.

" }, "contentDeliveryRules":{ "shape":"DatasetContentDeliveryRules", - "documentation":"

When data set contents are created they are delivered to destinations specified here.

" + "documentation":"

When dataset contents are created, they are delivered to destinations specified here.

" }, "retentionPeriod":{ "shape":"RetentionPeriod", - "documentation":"

How long, in days, data set contents are kept for the data set.

" + "documentation":"

How long, in days, dataset contents are kept for the dataset.

" }, "versioningConfiguration":{ "shape":"VersioningConfiguration", - "documentation":"

[Optional] How many versions of data set contents are kept. If not specified or set to null, only the latest version plus the latest succeeded version (if they are different) are kept for the time period specified by the \"retentionPeriod\" parameter. (For more information, see https://docs.aws.amazon.com/iotanalytics/latest/userguide/getting-started.html#aws-iot-analytics-dataset-versions)

" + "documentation":"

Optional. How many versions of dataset contents are kept. If not specified or set to null, only the latest version plus the latest succeeded version (if they are different) are kept for the time period specified by the retentionPeriod parameter. For more information, see Keeping Multiple Versions of AWS IoT Analytics Data Sets in the AWS IoT Analytics User Guide.

" + }, + "lateDataRules":{ + "shape":"LateDataRules", + "documentation":"

A list of data rules that send notifications to Amazon CloudWatch, when data arrives late. To specify lateDataRules, the dataset must use a DeltaTimer filter.

" } } }, @@ -3059,7 +3144,7 @@ }, "datastoreStorage":{ "shape":"DatastoreStorage", - "documentation":"

Where data store data is stored. You may choose one of \"serviceManagedS3\" or \"customerManagedS3\" storage. If not specified, the default is \"serviceManagedS3\". This cannot be changed after the data store is created.

" + "documentation":"

Where data store data is stored. You can choose one of serviceManagedS3 or customerManagedS3 storage. If not specified, the default isserviceManagedS3. You cannot change this storage option after the data store is created.

" } } }, @@ -3078,7 +3163,7 @@ }, "pipelineActivities":{ "shape":"PipelineActivities", - "documentation":"

A list of \"PipelineActivity\" objects. Activities perform transformations on your messages, such as removing, renaming or adding message attributes; filtering messages based on attribute values; invoking your Lambda functions on messages for advanced processing; or performing mathematical transformations to normalize device data.

The list can be 2-25 PipelineActivity objects and must contain both a channel and a datastore activity. Each entry in the list must contain only one activity, for example:

pipelineActivities = [ { \"channel\": { ... } }, { \"lambda\": { ... } }, ... ]

" + "documentation":"

A list of PipelineActivity objects. Activities perform transformations on your messages, such as removing, renaming or adding message attributes; filtering messages based on attribute values; invoking your Lambda functions on messages for advanced processing; or performing mathematical transformations to normalize device data.

The list can be 2-25 PipelineActivity objects and must contain both a channel and a datastore activity. Each entry in the list must contain only one activity. For example:

pipelineActivities = [ { \"channel\": { ... } }, { \"lambda\": { ... } }, ... ]

" } } }, @@ -3101,14 +3186,14 @@ }, "datasetContentVersionValue":{ "shape":"DatasetContentVersionValue", - "documentation":"

The value of the variable as a structure that specifies a data set content version.

" + "documentation":"

The value of the variable as a structure that specifies a dataset content version.

" }, "outputFileUriValue":{ "shape":"OutputFileUriValue", "documentation":"

The value of the variable as a structure that specifies an output file URI.

" } }, - "documentation":"

An instance of a variable to be passed to the \"containerAction\" execution. Each variable must have a name and a value given by one of \"stringValue\", \"datasetContentVersionValue\", or \"outputFileUriValue\".

" + "documentation":"

An instance of a variable to be passed to the containerAction execution. Each variable must have a name and a value given by one of stringValue, datasetContentVersionValue, or outputFileUriValue.

" }, "VariableName":{ "type":"string", @@ -3126,14 +3211,14 @@ "members":{ "unlimited":{ "shape":"UnlimitedVersioning", - "documentation":"

If true, unlimited versions of data set contents will be kept.

" + "documentation":"

If true, unlimited versions of dataset contents are kept.

" }, "maxVersions":{ "shape":"MaxVersions", - "documentation":"

How many versions of data set contents will be kept. The \"unlimited\" parameter must be false.

" + "documentation":"

How many versions of dataset contents are kept. The unlimited parameter must be false.

" } }, - "documentation":"

Information about the versioning of data set contents.

" + "documentation":"

Information about the versioning of dataset contents.

" }, "VolumeSizeInGB":{ "type":"integer", diff --git a/services/iotdataplane/pom.xml b/services/iotdataplane/pom.xml index c778eae1fa83..bd7746cfb3ad 100644 --- a/services/iotdataplane/pom.xml +++ b/services/iotdataplane/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT iotdataplane AWS Java SDK :: Services :: AWS IoT Data Plane diff --git a/services/iotevents/pom.xml b/services/iotevents/pom.xml index bdc0be79888e..7c92f0cb6543 100644 --- a/services/iotevents/pom.xml +++ b/services/iotevents/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT iotevents AWS Java SDK :: Services :: IoT Events diff --git a/services/ioteventsdata/pom.xml b/services/ioteventsdata/pom.xml index 662398a4fa20..b3ce1217ce82 100644 --- a/services/ioteventsdata/pom.xml +++ b/services/ioteventsdata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT ioteventsdata AWS Java SDK :: Services :: IoT Events Data diff --git a/services/iotjobsdataplane/pom.xml b/services/iotjobsdataplane/pom.xml index d377c1141c99..cb1a575d50ba 100644 --- a/services/iotjobsdataplane/pom.xml +++ b/services/iotjobsdataplane/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT iotjobsdataplane AWS Java SDK :: Services :: IoT Jobs Data Plane diff --git a/services/iotsecuretunneling/pom.xml b/services/iotsecuretunneling/pom.xml index 9ff6865d030b..f420a0916e9f 100644 --- a/services/iotsecuretunneling/pom.xml +++ b/services/iotsecuretunneling/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT iotsecuretunneling AWS Java SDK :: Services :: IoTSecureTunneling diff --git a/services/iotsecuretunneling/src/main/resources/codegen-resources/service-2.json b/services/iotsecuretunneling/src/main/resources/codegen-resources/service-2.json index effc6f03149e..622d36f63a77 100644 --- a/services/iotsecuretunneling/src/main/resources/codegen-resources/service-2.json +++ b/services/iotsecuretunneling/src/main/resources/codegen-resources/service-2.json @@ -73,7 +73,7 @@ "errors":[ {"shape":"LimitExceededException"} ], - "documentation":"

Creates a new tunnel, and returns two client access tokens for clients to use to connect to the AWS IoT Secure Tunneling proxy server. .

" + "documentation":"

Creates a new tunnel, and returns two client access tokens for clients to use to connect to the AWS IoT Secure Tunneling proxy server.

" }, "TagResource":{ "name":"TagResource", @@ -180,10 +180,7 @@ }, "DestinationConfig":{ "type":"structure", - "required":[ - "thingName", - "services" - ], + "required":["services"], "members":{ "thingName":{ "shape":"ThingName", @@ -191,7 +188,7 @@ }, "services":{ "shape":"ServiceList", - "documentation":"

A list of service names that identity the target application. Currently, you can only specify a single name. The AWS IoT client running on the destination device reads this value and uses it to look up a port or an IP address and a port. The AWS IoT client instantiates the local proxy which uses this information to connect to the destination application.

" + "documentation":"

A list of service names that identity the target application. The AWS IoT client running on the destination device reads this value and uses it to look up a port or an IP address and a port. The AWS IoT client instantiates the local proxy which uses this information to connect to the destination application.

" } }, "documentation":"

The destination configuration.

" @@ -316,14 +313,13 @@ }, "Service":{ "type":"string", - "max":8, + "max":128, "min":1, "pattern":"[a-zA-Z0-9:_-]+" }, "ServiceList":{ "type":"list", "member":{"shape":"Service"}, - "max":1, "min":1 }, "Tag":{ @@ -535,5 +531,5 @@ } } }, - "documentation":"AWS IoT Secure Tunneling

AWS IoT Secure Tunnling enables you to create remote connections to devices deployed in the field.

For more information about how AWS IoT Secure Tunneling works, see the User Guide.

" + "documentation":"AWS IoT Secure Tunneling

AWS IoT Secure Tunnling enables you to create remote connections to devices deployed in the field.

For more information about how AWS IoT Secure Tunneling works, see AWS IoT Secure Tunneling.

" } diff --git a/services/iotsitewise/pom.xml b/services/iotsitewise/pom.xml index 2e932d0f13b0..5058b6e9680e 100644 --- a/services/iotsitewise/pom.xml +++ b/services/iotsitewise/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT iotsitewise AWS Java SDK :: Services :: Io T Site Wise diff --git a/services/iotsitewise/src/main/resources/codegen-resources/service-2.json b/services/iotsitewise/src/main/resources/codegen-resources/service-2.json index e2ee16324326..995f540b24b9 100644 --- a/services/iotsitewise/src/main/resources/codegen-resources/service-2.json +++ b/services/iotsitewise/src/main/resources/codegen-resources/service-2.json @@ -219,7 +219,7 @@ {"shape":"InternalFailureException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Creates a pre-signed URL to a portal. Use this operation to create URLs to portals that use AWS Identity and Access Management (IAM) to authenticate users. An IAM user with access to a portal can call this API to get a URL to that portal. The URL contains a session token that lets the IAM user access the portal.

", + "documentation":"

Creates a pre-signed URL to a portal. Use this operation to create URLs to portals that use AWS Identity and Access Management (IAM) to authenticate users. An IAM user with access to a portal can call this API to get a URL to that portal. The URL contains an authentication token that lets the IAM user access the portal.

", "endpoint":{"hostPrefix":"monitor."} }, "CreateProject":{ @@ -455,6 +455,21 @@ "documentation":"

Retrieves information about a dashboard.

", "endpoint":{"hostPrefix":"monitor."} }, + "DescribeDefaultEncryptionConfiguration":{ + "name":"DescribeDefaultEncryptionConfiguration", + "http":{ + "method":"GET", + "requestUri":"/configuration/account/encryption" + }, + "input":{"shape":"DescribeDefaultEncryptionConfigurationRequest"}, + "output":{"shape":"DescribeDefaultEncryptionConfigurationResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Retrieves information about the default encryption configuration for the AWS account in the default or specified region. For more information, see Key management in the AWS IoT SiteWise User Guide.

" + }, "DescribeGateway":{ "name":"DescribeGateway", "http":{ @@ -780,6 +795,23 @@ ], "documentation":"

Retrieves the list of tags for an AWS IoT SiteWise resource.

" }, + "PutDefaultEncryptionConfiguration":{ + "name":"PutDefaultEncryptionConfiguration", + "http":{ + "method":"POST", + "requestUri":"/configuration/account/encryption" + }, + "input":{"shape":"PutDefaultEncryptionConfigurationRequest"}, + "output":{"shape":"PutDefaultEncryptionConfigurationResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"}, + {"shape":"ConflictingOperationException"} + ], + "documentation":"

Sets the default encryption configuration for the AWS account. For more information, see Key management in the AWS IoT SiteWise User Guide.

" + }, "PutLoggingOptions":{ "name":"PutLoggingOptions", "http":{ @@ -1795,6 +1827,47 @@ "min":36, "pattern":"\\S{36,64}" }, + "ConfigurationErrorDetails":{ + "type":"structure", + "required":[ + "code", + "message" + ], + "members":{ + "code":{ + "shape":"ErrorCode", + "documentation":"

" + }, + "message":{ + "shape":"ErrorMessage", + "documentation":"

" + } + }, + "documentation":"

" + }, + "ConfigurationState":{ + "type":"string", + "enum":[ + "ACTIVE", + "UPDATE_IN_PROGRESS", + "UPDATE_FAILED" + ] + }, + "ConfigurationStatus":{ + "type":"structure", + "required":["state"], + "members":{ + "state":{ + "shape":"ConfigurationState", + "documentation":"

" + }, + "error":{ + "shape":"ConfigurationErrorDetails", + "documentation":"

" + } + }, + "documentation":"

" + }, "ConflictingOperationException":{ "type":"structure", "required":[ @@ -2143,7 +2216,7 @@ }, "sessionDurationSeconds":{ "shape":"SessionDurationSeconds", - "documentation":"

The duration (in seconds) for which the session at the URL is valid.

Default: 900 seconds (15 minutes)

", + "documentation":"

The duration (in seconds) for which the session at the URL is valid.

Default: 43,200 seconds (12 hours)

", "location":"querystring", "locationName":"sessionDurationSeconds" } @@ -2155,7 +2228,7 @@ "members":{ "presignedPortalUrl":{ "shape":"Url", - "documentation":"

The pre-signed URL to the portal. The URL contains the portal ID and a session token that lets you access the portal. The URL has the following format.

https://<portal-id>.app.iotsitewise.aws/auth?token=<encrypted-token>

" + "documentation":"

The pre-signed URL to the portal. The URL contains the portal ID and an authentication token that lets you access the portal. The URL has the following format.

https://<portal-id>.app.iotsitewise.aws/iam?token=<encrypted-token>

" } } }, @@ -2711,6 +2784,32 @@ } } }, + "DescribeDefaultEncryptionConfigurationRequest":{ + "type":"structure", + "members":{ + } + }, + "DescribeDefaultEncryptionConfigurationResponse":{ + "type":"structure", + "required":[ + "encryptionType", + "configurationStatus" + ], + "members":{ + "encryptionType":{ + "shape":"EncryptionType", + "documentation":"

The type of encryption used for the encryption configuration.

" + }, + "kmsKeyArn":{ + "shape":"ARN", + "documentation":"

The key ARN of the customer managed customer master key (CMK) used for AWS KMS encryption if you use KMS_BASED_ENCRYPTION.

" + }, + "configurationStatus":{ + "shape":"ConfigurationStatus", + "documentation":"

The status of the account configuration. This contains the ConfigurationState. If there's an error, it also contains the ErrorDetails.

" + } + } + }, "DescribeGatewayCapabilityConfigurationRequest":{ "type":"structure", "required":[ @@ -3001,6 +3100,13 @@ "min":1, "pattern":"[^@]+@[^@]+" }, + "EncryptionType":{ + "type":"string", + "enum":[ + "SITEWISE_DEFAULT_ENCRYPTION", + "KMS_BASED_ENCRYPTION" + ] + }, "EntryId":{ "type":"string", "max":64, @@ -3037,8 +3143,7 @@ "Expression":{ "type":"string", "max":1024, - "min":1, - "pattern":"^[a-z0-9._+\\-*%/^, ()]+$" + "min":1 }, "ExpressionVariable":{ "type":"structure", @@ -3487,6 +3592,11 @@ "error":{"httpStatusCode":400}, "exception":true }, + "KmsKeyId":{ + "type":"string", + "max":2048, + "min":1 + }, "LimitExceededException":{ "type":"structure", "required":["message"], @@ -4283,6 +4393,41 @@ }, "documentation":"

Contains a list of value updates for an asset property in the list of asset entries consumed by the BatchPutAssetPropertyValue API operation.

" }, + "PutDefaultEncryptionConfigurationRequest":{ + "type":"structure", + "required":["encryptionType"], + "members":{ + "encryptionType":{ + "shape":"EncryptionType", + "documentation":"

The type of encryption used for the encryption configuration.

" + }, + "kmsKeyId":{ + "shape":"KmsKeyId", + "documentation":"

The Key ID of the customer managed customer master key (CMK) used for AWS KMS encryption. This is required if you use KMS_BASED_ENCRYPTION.

" + } + } + }, + "PutDefaultEncryptionConfigurationResponse":{ + "type":"structure", + "required":[ + "encryptionType", + "configurationStatus" + ], + "members":{ + "encryptionType":{ + "shape":"EncryptionType", + "documentation":"

The type of encryption used for the encryption configuration.

" + }, + "kmsKeyArn":{ + "shape":"ARN", + "documentation":"

The Key ARN of the AWS KMS CMK used for AWS KMS encryption if you use KMS_BASED_ENCRYPTION.

" + }, + "configurationStatus":{ + "shape":"ConfigurationStatus", + "documentation":"

The status of the account configuration. This contains the ConfigurationState. If there is an error, it also contains the ErrorDetails.

" + } + } + }, "PutLoggingOptionsRequest":{ "type":"structure", "required":["loggingOptions"], diff --git a/services/iotthingsgraph/pom.xml b/services/iotthingsgraph/pom.xml index d63ab356406c..195da05dd925 100644 --- a/services/iotthingsgraph/pom.xml +++ b/services/iotthingsgraph/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT iotthingsgraph AWS Java SDK :: Services :: IoTThingsGraph diff --git a/services/ivs/pom.xml b/services/ivs/pom.xml index a1e46a765d21..11779f003c13 100644 --- a/services/ivs/pom.xml +++ b/services/ivs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT ivs AWS Java SDK :: Services :: Ivs diff --git a/services/kafka/pom.xml b/services/kafka/pom.xml index 2880e5ec70b2..7b4ded90f666 100644 --- a/services/kafka/pom.xml +++ b/services/kafka/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT kafka AWS Java SDK :: Services :: Kafka diff --git a/services/kafka/src/main/resources/codegen-resources/service-2.json b/services/kafka/src/main/resources/codegen-resources/service-2.json index 1a15820db076..0354971783f8 100644 --- a/services/kafka/src/main/resources/codegen-resources/service-2.json +++ b/services/kafka/src/main/resources/codegen-resources/service-2.json @@ -1392,7 +1392,7 @@ "EnhancedMonitoring": { "shape": "EnhancedMonitoring", "locationName": "enhancedMonitoring", - "documentation": "\n

Specifies which metrics are gathered for the MSK cluster. This property has three possible values: DEFAULT, PER_BROKER, and PER_TOPIC_PER_BROKER. For a list of the metrics associated with each of these three levels of monitoring, see Monitoring.

\n " + "documentation": "\n

Specifies which metrics are gathered for the MSK cluster. This property has the following possible values: DEFAULT, PER_BROKER, PER_TOPIC_PER_BROKER, and PER_TOPIC_PER_PARTITION. For a list of the metrics associated with each of these levels of monitoring, see Monitoring.

\n " }, "OpenMonitoring" : { "shape" : "OpenMonitoring", @@ -1411,7 +1411,7 @@ "State": { "shape": "ClusterState", "locationName": "state", - "documentation": "\n

The state of the cluster. The possible states are CREATING, ACTIVE, and FAILED.

\n " + "documentation": "\n

The state of the cluster. The possible states are ACTIVE, CREATING, DELETING, FAILED, MAINTENANCE, REBOOTING_BROKER, and UPDATING.

\n " }, "StateInfo" : { "shape" : "StateInfo", @@ -1529,9 +1529,11 @@ "enum": [ "ACTIVE", "CREATING", - "UPDATING", "DELETING", - "FAILED" + "FAILED", + "MAINTENANCE", + "REBOOTING_BROKER", + "UPDATING" ] }, "CompatibleKafkaVersion" : { @@ -1705,7 +1707,7 @@ "EnhancedMonitoring": { "shape": "EnhancedMonitoring", "locationName": "enhancedMonitoring", - "documentation": "\n

Specifies the level of monitoring for the MSK cluster. The possible values are DEFAULT, PER_BROKER, and PER_TOPIC_PER_BROKER.

\n " + "documentation": "\n

Specifies the level of monitoring for the MSK cluster. The possible values are DEFAULT, PER_BROKER, PER_TOPIC_PER_BROKER, and PER_TOPIC_PER_PARTITION.

\n " }, "OpenMonitoring" : { "shape" : "OpenMonitoringInfo", @@ -1755,7 +1757,7 @@ "State": { "shape": "ClusterState", "locationName": "state", - "documentation": "\n

The state of the cluster. The possible states are CREATING, ACTIVE, and FAILED.

\n " + "documentation": "\n

The state of the cluster. The possible states are ACTIVE, CREATING, DELETING, FAILED, MAINTENANCE, REBOOTING_BROKER, and UPDATING.

\n " } } }, @@ -1849,7 +1851,7 @@ "State": { "shape": "ClusterState", "locationName": "state", - "documentation": "\n

The state of the cluster. The possible states are CREATING, ACTIVE, and FAILED.

\n " + "documentation": "\n

The state of the cluster. The possible states are ACTIVE, CREATING, DELETING, FAILED, MAINTENANCE, REBOOTING_BROKER, and UPDATING.

\n " } } }, @@ -2127,11 +2129,12 @@ }, "EnhancedMonitoring": { "type": "string", - "documentation": "\n

Specifies which metrics are gathered for the MSK cluster. This property has three possible values: DEFAULT, PER_BROKER, and PER_TOPIC_PER_BROKER. For a list of the metrics associated with each of these three levels of monitoring, see Monitoring.

\n ", + "documentation": "\n

Specifies which metrics are gathered for the MSK cluster. This property has the following possible values: DEFAULT, PER_BROKER, PER_TOPIC_PER_BROKER, and PER_TOPIC_PER_PARTITION. For a list of the metrics associated with each of these levels of monitoring, see Monitoring.

\n ", "enum": [ "DEFAULT", "PER_BROKER", - "PER_TOPIC_PER_BROKER" + "PER_TOPIC_PER_BROKER", + "PER_TOPIC_PER_PARTITION" ] }, "Error": { diff --git a/services/kendra/pom.xml b/services/kendra/pom.xml index d48f6ed1b877..eb2e0756c10a 100644 --- a/services/kendra/pom.xml +++ b/services/kendra/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT kendra AWS Java SDK :: Services :: Kendra diff --git a/services/kendra/src/main/resources/codegen-resources/service-2.json b/services/kendra/src/main/resources/codegen-resources/service-2.json index 35ae53944ceb..99f0a4dbb1b8 100644 --- a/services/kendra/src/main/resources/codegen-resources/service-2.json +++ b/services/kendra/src/main/resources/codegen-resources/service-2.json @@ -690,6 +690,12 @@ "max":5, "min":1 }, + "ClaimRegex":{ + "type":"string", + "max":100, + "min":1, + "pattern":"^\\P{C}*$" + }, "ClickFeedback":{ "type":"structure", "required":[ @@ -762,6 +768,287 @@ "documentation":"

", "exception":true }, + "ConfluenceAttachmentConfiguration":{ + "type":"structure", + "members":{ + "CrawlAttachments":{ + "shape":"Boolean", + "documentation":"

Indicates whether Amazon Kendra indexes attachments to the pages and blogs in the Confluence data source.

" + }, + "AttachmentFieldMappings":{ + "shape":"ConfluenceAttachmentFieldMappingsList", + "documentation":"

Defines how attachment metadata fields should be mapped to index fields. Before you can map a field, you must first create an index field with a matching type using the console or the UpdateIndex operation.

If you specify the AttachentFieldMappings parameter, you must specify at least one field mapping.

" + } + }, + "documentation":"

Specifies the attachment settings for the Confluence data source. Attachment settings are optional, if you don't specify settings attachments, Amazon Kendra won't index them.

" + }, + "ConfluenceAttachmentFieldMappingsList":{ + "type":"list", + "member":{"shape":"ConfluenceAttachmentToIndexFieldMapping"}, + "max":11, + "min":1 + }, + "ConfluenceAttachmentFieldName":{ + "type":"string", + "enum":[ + "AUTHOR", + "CONTENT_TYPE", + "CREATED_DATE", + "DISPLAY_URL", + "FILE_SIZE", + "ITEM_TYPE", + "PARENT_ID", + "SPACE_KEY", + "SPACE_NAME", + "URL", + "VERSION" + ] + }, + "ConfluenceAttachmentToIndexFieldMapping":{ + "type":"structure", + "members":{ + "DataSourceFieldName":{ + "shape":"ConfluenceAttachmentFieldName", + "documentation":"

The name of the field in the data source.

You must first create the index field using the operation.

" + }, + "DateFieldFormat":{ + "shape":"DataSourceDateFieldFormat", + "documentation":"

The format for date fields in the data source. If the field specified in DataSourceFieldName is a date field you must specify the date format. If the field is not a date field, an exception is thrown.

" + }, + "IndexFieldName":{ + "shape":"IndexFieldName", + "documentation":"

The name of the index field to map to the Confluence data source field. The index field type must match the Confluence field type.

" + } + }, + "documentation":"

Defines the mapping between a field in the Confluence data source to a Amazon Kendra index field.

You must first create the index field using the operation.

" + }, + "ConfluenceBlogConfiguration":{ + "type":"structure", + "members":{ + "BlogFieldMappings":{ + "shape":"ConfluenceBlogFieldMappingsList", + "documentation":"

Defines how blog metadata fields should be mapped to index fields. Before you can map a field, you must first create an index field with a matching type using the console or the UpdateIndex operation.

If you specify the BlogFieldMappings parameter, you must specify at least one field mapping.

" + } + }, + "documentation":"

Specifies the blog settings for the Confluence data source. Blogs are always indexed unless filtered from the index by the ExclusionPatterns or InclusionPatterns fields in the data type.

" + }, + "ConfluenceBlogFieldMappingsList":{ + "type":"list", + "member":{"shape":"ConfluenceBlogToIndexFieldMapping"}, + "max":9, + "min":1 + }, + "ConfluenceBlogFieldName":{ + "type":"string", + "enum":[ + "AUTHOR", + "DISPLAY_URL", + "ITEM_TYPE", + "LABELS", + "PUBLISH_DATE", + "SPACE_KEY", + "SPACE_NAME", + "URL", + "VERSION" + ] + }, + "ConfluenceBlogToIndexFieldMapping":{ + "type":"structure", + "members":{ + "DataSourceFieldName":{ + "shape":"ConfluenceBlogFieldName", + "documentation":"

The name of the field in the data source.

" + }, + "DateFieldFormat":{ + "shape":"DataSourceDateFieldFormat", + "documentation":"

The format for date fields in the data source. If the field specified in DataSourceFieldName is a date field you must specify the date format. If the field is not a date field, an exception is thrown.

" + }, + "IndexFieldName":{ + "shape":"IndexFieldName", + "documentation":"

The name of the index field to map to the Confluence data source field. The index field type must match the Confluence field type.

" + } + }, + "documentation":"

Defines the mapping between a blog field in the Confluence data source to a Amazon Kendra index field.

You must first create the index field using the operation.

" + }, + "ConfluenceConfiguration":{ + "type":"structure", + "required":[ + "ServerUrl", + "SecretArn", + "Version" + ], + "members":{ + "ServerUrl":{ + "shape":"Url", + "documentation":"

The URL of your Confluence instance. Use the full URL of the server. For example, https://server.example.com:port/. You can also use an IP address, for example, https://192.168.1.113/.

" + }, + "SecretArn":{ + "shape":"SecretArn", + "documentation":"

The Amazon Resource Name (ARN) of an AWS Secrets Manager secret that contains the key/value pairs required to connect to your Confluence server. The secret must contain a JSON structure with the following keys:

  • username - The user name or email address of a user with administrative privileges for the Confluence server.

  • password - The password associated with the user logging in to the Confluence server.

" + }, + "Version":{ + "shape":"ConfluenceVersion", + "documentation":"

Specifies the version of the Confluence installation that you are connecting to.

" + }, + "SpaceConfiguration":{ + "shape":"ConfluenceSpaceConfiguration", + "documentation":"

Specifies configuration information for indexing Confluence spaces.

" + }, + "PageConfiguration":{ + "shape":"ConfluencePageConfiguration", + "documentation":"

Specifies configuration information for indexing Confluence pages.

" + }, + "BlogConfiguration":{ + "shape":"ConfluenceBlogConfiguration", + "documentation":"

Specifies configuration information for indexing Confluence blogs.

" + }, + "AttachmentConfiguration":{ + "shape":"ConfluenceAttachmentConfiguration", + "documentation":"

Specifies configuration information for indexing attachments to Confluence blogs and pages.

" + }, + "VpcConfiguration":{ + "shape":"DataSourceVpcConfiguration", + "documentation":"

Specifies the information for connecting to an Amazon VPC.

" + }, + "InclusionPatterns":{ + "shape":"DataSourceInclusionsExclusionsStrings", + "documentation":"

A list of regular expression patterns that apply to a URL on the Confluence server. An inclusion pattern can apply to a blog post, a page, a space, or an attachment. Items that match the patterns are included in the index. Items that don't match the pattern are excluded from the index. If an item matches both an inclusion pattern and an exclusion pattern, the item isn't included in the index.

" + }, + "ExclusionPatterns":{ + "shape":"DataSourceInclusionsExclusionsStrings", + "documentation":"

A list of regular expression patterns that apply to a URL on the Confluence server. An exclusion pattern can apply to a blog post, a page, a space, or an attachment. Items that match the pattern are excluded from the index. Items that don't match the pattern are included in the index. If a item matches both an exclusion pattern and an inclusion pattern, the item isn't included in the index.

" + } + }, + "documentation":"

Provides configuration information for data sources that connect to Confluence.

" + }, + "ConfluencePageConfiguration":{ + "type":"structure", + "members":{ + "PageFieldMappings":{ + "shape":"ConfluencePageFieldMappingsList", + "documentation":"

Defines how page metadata fields should be mapped to index fields. Before you can map a field, you must first create an index field with a matching type using the console or the UpdateIndex operation.

If you specify the PageFieldMappings parameter, you must specify at least one field mapping.

" + } + }, + "documentation":"

Specifies the page settings for the Confluence data source.

" + }, + "ConfluencePageFieldMappingsList":{ + "type":"list", + "member":{"shape":"ConfluencePageToIndexFieldMapping"}, + "max":12, + "min":1 + }, + "ConfluencePageFieldName":{ + "type":"string", + "enum":[ + "AUTHOR", + "CONTENT_STATUS", + "CREATED_DATE", + "DISPLAY_URL", + "ITEM_TYPE", + "LABELS", + "MODIFIED_DATE", + "PARENT_ID", + "SPACE_KEY", + "SPACE_NAME", + "URL", + "VERSION" + ] + }, + "ConfluencePageToIndexFieldMapping":{ + "type":"structure", + "members":{ + "DataSourceFieldName":{ + "shape":"ConfluencePageFieldName", + "documentation":"

The name of the field in the data source.

" + }, + "DateFieldFormat":{ + "shape":"DataSourceDateFieldFormat", + "documentation":"

The format for date fields in the data source. If the field specified in DataSourceFieldName is a date field you must specify the date format. If the field is not a date field, an exception is thrown.

" + }, + "IndexFieldName":{ + "shape":"IndexFieldName", + "documentation":"

The name of the index field to map to the Confluence data source field. The index field type must match the Confluence field type.

" + } + }, + "documentation":"

Defines the mapping between a field in the Confluence data source to a Amazon Kendra index field.

You must first create the index field using the operation.

" + }, + "ConfluenceSpaceConfiguration":{ + "type":"structure", + "members":{ + "CrawlPersonalSpaces":{ + "shape":"Boolean", + "documentation":"

Specifies whether Amazon Kendra should index personal spaces. Users can add restrictions to items in personal spaces. If personal spaces are indexed, queries without user context information may return restricted items from a personal space in their results. For more information, see Filtering on user context.

" + }, + "CrawlArchivedSpaces":{ + "shape":"Boolean", + "documentation":"

Specifies whether Amazon Kendra should index archived spaces.

" + }, + "IncludeSpaces":{ + "shape":"ConfluenceSpaceList", + "documentation":"

A list of space keys for Confluence spaces. If you include a key, the blogs, documents, and attachments in the space are indexed. Spaces that aren't in the list aren't indexed. A space in the list must exist. Otherwise, Amazon Kendra logs an error when the data source is synchronized. If a space is in both the IncludeSpaces and the ExcludeSpaces list, the space is excluded.

" + }, + "ExcludeSpaces":{ + "shape":"ConfluenceSpaceList", + "documentation":"

A list of space keys of Confluence spaces. If you include a key, the blogs, documents, and attachments in the space are not indexed. If a space is in both the ExcludeSpaces and the IncludeSpaces list, the space is excluded.

" + }, + "SpaceFieldMappings":{ + "shape":"ConfluenceSpaceFieldMappingsList", + "documentation":"

Defines how space metadata fields should be mapped to index fields. Before you can map a field, you must first create an index field with a matching type using the console or the UpdateIndex operation.

If you specify the SpaceFieldMappings parameter, you must specify at least one field mapping.

" + } + }, + "documentation":"

Specifies the configuration for indexing Confluence spaces.

" + }, + "ConfluenceSpaceFieldMappingsList":{ + "type":"list", + "member":{"shape":"ConfluenceSpaceToIndexFieldMapping"}, + "max":4, + "min":1 + }, + "ConfluenceSpaceFieldName":{ + "type":"string", + "enum":[ + "DISPLAY_URL", + "ITEM_TYPE", + "SPACE_KEY", + "URL" + ] + }, + "ConfluenceSpaceIdentifier":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^\\P{C}*$" + }, + "ConfluenceSpaceList":{ + "type":"list", + "member":{"shape":"ConfluenceSpaceIdentifier"}, + "min":1 + }, + "ConfluenceSpaceToIndexFieldMapping":{ + "type":"structure", + "members":{ + "DataSourceFieldName":{ + "shape":"ConfluenceSpaceFieldName", + "documentation":"

The name of the field in the data source.

" + }, + "DateFieldFormat":{ + "shape":"DataSourceDateFieldFormat", + "documentation":"

The format for date fields in the data source. If the field specified in DataSourceFieldName is a date field you must specify the date format. If the field is not a date field, an exception is thrown.

" + }, + "IndexFieldName":{ + "shape":"IndexFieldName", + "documentation":"

The name of the index field to map to the Confluence data source field. The index field type must match the Confluence field type.

" + } + }, + "documentation":"

Defines the mapping between a field in the Confluence data source to a Amazon Kendra index field.

You must first create the index field using the operation.

" + }, + "ConfluenceVersion":{ + "type":"string", + "enum":[ + "CLOUD", + "SERVER" + ] + }, "ConnectionConfiguration":{ "type":"structure", "required":[ @@ -810,9 +1097,7 @@ "required":[ "Name", "IndexId", - "Type", - "Configuration", - "RoleArn" + "Type" ], "members":{ "Name":{ @@ -829,7 +1114,7 @@ }, "Configuration":{ "shape":"DataSourceConfiguration", - "documentation":"

The data source connector configuration information that is required to access the repository.

" + "documentation":"

The connector configuration information that is required to access the repository.

You can't specify the Configuration parameter when the Type parameter is set to CUSTOM. If you do, you receive a ValidationException exception.

The Configuration parameter is required for all other data sources.

" }, "Description":{ "shape":"Description", @@ -837,15 +1122,20 @@ }, "Schedule":{ "shape":"ScanSchedule", - "documentation":"

Sets the frequency that Amazon Kendra will check the documents in your repository and update the index. If you don't set a schedule Amazon Kendra will not periodically update the index. You can call the StartDataSourceSyncJob operation to update the index.

" + "documentation":"

Sets the frequency that Amazon Kendra will check the documents in your repository and update the index. If you don't set a schedule Amazon Kendra will not periodically update the index. You can call the StartDataSourceSyncJob operation to update the index.

You can't specify the Schedule parameter when the Type parameter is set to CUSTOM. If you do, you receive a ValidationException exception.

" }, "RoleArn":{ "shape":"RoleArn", - "documentation":"

The Amazon Resource Name (ARN) of a role with permission to access the data source. For more information, see IAM Roles for Amazon Kendra.

" + "documentation":"

The Amazon Resource Name (ARN) of a role with permission to access the data source. For more information, see IAM Roles for Amazon Kendra.

You can't specify the RoleArn parameter when the Type parameter is set to CUSTOM. If you do, you receive a ValidationException exception.

The RoleArn parameter is required for all other data sources.

" }, "Tags":{ "shape":"TagList", "documentation":"

A list of key-value pairs that identify the data source. You can use the tags to identify and organize your resources and to control access to resources.

" + }, + "ClientToken":{ + "shape":"ClientTokenName", + "documentation":"

A token that you provide to identify the request to create a data source. Multiple calls to the CreateDataSource operation with the same client token will create only one data source.

", + "idempotencyToken":true } } }, @@ -895,6 +1185,11 @@ "FileFormat":{ "shape":"FaqFileFormat", "documentation":"

The format of the input file. You can choose between a basic CSV format, a CSV format that includes customs attributes in a header, and a JSON format that includes custom attributes.

The format must match the format of the file stored in the S3 bucket identified in the S3Path parameter.

For more information, see Adding questions and answers.

" + }, + "ClientToken":{ + "shape":"ClientTokenName", + "documentation":"

A token that you provide to identify the request to create a FAQ. Multiple calls to the CreateFaqRequest operation with the same client token will create only one FAQ.

", + "idempotencyToken":true } } }, @@ -924,7 +1219,7 @@ }, "RoleArn":{ "shape":"RoleArn", - "documentation":"

An IAM role that gives Amazon Kendra permissions to access your Amazon CloudWatch logs and metrics. This is also the role used when you use the BatchPutDocument operation to index documents from an Amazon S3 bucket.

" + "documentation":"

An AWS Identity and Access Management (IAM) role that gives Amazon Kendra permissions to access your Amazon CloudWatch logs and metrics. This is also the role used when you use the BatchPutDocument operation to index documents from an Amazon S3 bucket.

" }, "ServerSideEncryptionConfiguration":{ "shape":"ServerSideEncryptionConfiguration", @@ -936,12 +1231,20 @@ }, "ClientToken":{ "shape":"ClientTokenName", - "documentation":"

A token that you provide to identify the request to create an index. Multiple calls to the CreateIndex operation with the same client token will create only one index.”

", + "documentation":"

A token that you provide to identify the request to create an index. Multiple calls to the CreateIndex operation with the same client token will create only one index.

", "idempotencyToken":true }, "Tags":{ "shape":"TagList", "documentation":"

A list of key-value pairs that identify the index. You can use the tags to identify and organize your resources and to control access to resources.

" + }, + "UserTokenConfigurations":{ + "shape":"UserTokenConfigurationList", + "documentation":"

The user token configuration.

" + }, + "UserContextPolicy":{ + "shape":"UserContextPolicy", + "documentation":"

The user context policy.

ATTRIBUTE_FILTER

All indexed content is searchable and displayable for all users. If there is an access control list, it is ignored. You can filter on user and group attributes.

USER_TOKEN

Enables SSO and token-based user access control. All documents with no access control and all documents accessible to the user will be searchable and displayable.

" } } }, @@ -980,6 +1283,10 @@ "ServiceNowConfiguration":{ "shape":"ServiceNowConfiguration", "documentation":"

Provides configuration for data sources that connect to ServiceNow instances.

" + }, + "ConfluenceConfiguration":{ + "shape":"ConfluenceConfiguration", + "documentation":"

Provides configuration information for connecting to a Confluence data source.

" } }, "documentation":"

Configuration information for a Amazon Kendra data source.

" @@ -1010,7 +1317,7 @@ }, "DataSourceInclusionsExclusionsStringsMember":{ "type":"string", - "max":50, + "max":150, "min":1 }, "DataSourceName":{ @@ -1203,7 +1510,9 @@ "DATABASE", "SALESFORCE", "ONEDRIVE", - "SERVICENOW" + "SERVICENOW", + "CUSTOM", + "CONFLUENCE" ] }, "DataSourceVpcConfiguration":{ @@ -1522,13 +1831,21 @@ "CapacityUnits":{ "shape":"CapacityUnitsConfiguration", "documentation":"

For enterprise edtion indexes, you can choose to use additional capacity to meet the needs of your application. This contains the capacity units used for the index. A 0 for the query capacity or the storage capacity indicates that the index is using the default capacity for the index.

" + }, + "UserTokenConfigurations":{ + "shape":"UserTokenConfigurationList", + "documentation":"

The user token configuration for the Amazon Kendra index.

" + }, + "UserContextPolicy":{ + "shape":"UserContextPolicy", + "documentation":"

The user context policy for the Amazon Kendra index.

" } } }, "Description":{ "type":"string", "max":1000, - "min":1, + "min":0, "pattern":"^\\P{C}*$" }, "Document":{ @@ -1850,6 +2167,12 @@ "type":"list", "member":{"shape":"FaqSummary"} }, + "GroupAttributeField":{ + "type":"string", + "max":100, + "min":1, + "pattern":"^\\P{C}*$" + }, "Highlight":{ "type":"structure", "required":[ @@ -1996,6 +2319,72 @@ "exception":true, "fault":true }, + "Issuer":{ + "type":"string", + "max":65, + "min":1, + "pattern":"^\\P{C}*$" + }, + "JsonTokenTypeConfiguration":{ + "type":"structure", + "required":[ + "UserNameAttributeField", + "GroupAttributeField" + ], + "members":{ + "UserNameAttributeField":{ + "shape":"String", + "documentation":"

The user name attribute field.

" + }, + "GroupAttributeField":{ + "shape":"String", + "documentation":"

The group attribute field.

" + } + }, + "documentation":"

Configuration information for the JSON token type.

" + }, + "JwtTokenTypeConfiguration":{ + "type":"structure", + "required":["KeyLocation"], + "members":{ + "KeyLocation":{ + "shape":"KeyLocation", + "documentation":"

The location of the key.

" + }, + "URL":{ + "shape":"Url", + "documentation":"

The signing key URL.

" + }, + "SecretManagerArn":{ + "shape":"RoleArn", + "documentation":"

The Amazon Resource Name (arn) of the secret.

" + }, + "UserNameAttributeField":{ + "shape":"UserNameAttributeField", + "documentation":"

The user name attribute field.

" + }, + "GroupAttributeField":{ + "shape":"GroupAttributeField", + "documentation":"

The group attribute field.

" + }, + "Issuer":{ + "shape":"Issuer", + "documentation":"

The issuer of the token.

" + }, + "ClaimRegex":{ + "shape":"ClaimRegex", + "documentation":"

The regular expression that identifies the claim.

" + } + }, + "documentation":"

Configuration information for the JWT token type.

" + }, + "KeyLocation":{ + "type":"string", + "enum":[ + "URL", + "SECRET_MANAGER" + ] + }, "KmsKeyId":{ "type":"string", "max":2048, @@ -2216,6 +2605,10 @@ "FieldMappings":{ "shape":"DataSourceToIndexFieldMappingList", "documentation":"

A list of DataSourceToIndexFieldMapping objects that map Microsoft OneDrive fields to custom fields in the Amazon Kendra index. You must first create the index fields before you map OneDrive fields.

" + }, + "DisableLocalGroups":{ + "shape":"Boolean", + "documentation":"

A Boolean value that specifies whether local groups are disabled (True) or enabled (False).

" } }, "documentation":"

Provides configuration information for data sources that connect to OneDrive.

" @@ -2351,6 +2744,10 @@ "SortingConfiguration":{ "shape":"SortingConfiguration", "documentation":"

Provides information that determines how the results of the query are sorted. You can set the field that Amazon Kendra should sort the results on, and specify whether the results should be sorted in ascending or descending order. In the case of ties in sorting the results, the results are sorted by relevance.

If you don't provide sorting configuration, the results are sorted by the relevance that Amazon Kendra determines for the result.

" + }, + "UserContext":{ + "shape":"UserContext", + "documentation":"

The user context token.

" } } }, @@ -2558,9 +2955,13 @@ "shape":"DataSourceInclusionsExclusionsStrings", "documentation":"

A list of S3 prefixes for the documents that should be included in the index.

" }, + "InclusionPatterns":{ + "shape":"DataSourceInclusionsExclusionsStrings", + "documentation":"

A list of glob patterns for documents that should be indexed. If a document that matches an inclusion pattern also matches an exclusion pattern, the document is not indexed.

For more information about glob patterns, see glob (programming) in Wikipedia.

" + }, "ExclusionPatterns":{ "shape":"DataSourceInclusionsExclusionsStrings", - "documentation":"

A list of glob patterns for documents that should not be indexed. If a document that matches an inclusion prefix also matches an exclusion pattern, the document is not indexed.

For more information about glob patterns, see glob (programming) in Wikipedia.

" + "documentation":"

A list of glob patterns for documents that should not be indexed. If a document that matches an inclusion prefix or inclusion pattern also matches an exclusion pattern, the document is not indexed.

For more information about glob patterns, see glob (programming) in Wikipedia.

" }, "DocumentsMetadataConfiguration":{"shape":"DocumentsMetadataConfiguration"}, "AccessControlListConfiguration":{ @@ -3056,6 +3457,10 @@ "DocumentTitleFieldName":{ "shape":"DataSourceFieldName", "documentation":"

The Microsoft SharePoint attribute field that contains the title of the document.

" + }, + "DisableLocalGroups":{ + "shape":"Boolean", + "documentation":"

A Boolean value that specifies whether local groups are disabled (True) or enabled (False).

" } }, "documentation":"

Provides configuration information for connecting to a Microsoft SharePoint data source.

" @@ -3324,6 +3729,12 @@ }, "Timestamp":{"type":"timestamp"}, "Title":{"type":"string"}, + "Token":{ + "type":"string", + "max":100000, + "min":1, + "pattern":"^\\P{C}*$" + }, "UntagResourceRequest":{ "type":"structure", "required":[ @@ -3407,6 +3818,14 @@ "CapacityUnits":{ "shape":"CapacityUnitsConfiguration", "documentation":"

Sets the number of addtional storage and query capacity units that should be used by the index. You can change the capacity of the index up to 5 times per day.

If you are using extra storage units, you can't reduce the storage capacity below that required to meet the storage needs for your index.

" + }, + "UserTokenConfigurations":{ + "shape":"UserTokenConfigurationList", + "documentation":"

The user token configuration.

" + }, + "UserContextPolicy":{ + "shape":"UserContextPolicy", + "documentation":"

The user user token context policy.

" } } }, @@ -3416,6 +3835,48 @@ "min":1, "pattern":"^(https?|ftp|file):\\/\\/([^\\s]*)" }, + "UserContext":{ + "type":"structure", + "members":{ + "Token":{ + "shape":"Token", + "documentation":"

The user context token. It must be a JWT or a JSON token.

" + } + }, + "documentation":"

Provides information about the user context for a Amazon Kendra index.

" + }, + "UserContextPolicy":{ + "type":"string", + "enum":[ + "ATTRIBUTE_FILTER", + "USER_TOKEN" + ] + }, + "UserNameAttributeField":{ + "type":"string", + "max":100, + "min":1, + "pattern":"^\\P{C}*$" + }, + "UserTokenConfiguration":{ + "type":"structure", + "members":{ + "JwtTokenTypeConfiguration":{ + "shape":"JwtTokenTypeConfiguration", + "documentation":"

Information about the JWT token type configuration.

" + }, + "JsonTokenTypeConfiguration":{ + "shape":"JsonTokenTypeConfiguration", + "documentation":"

Information about the JSON token type configuration.

" + } + }, + "documentation":"

Provides configuration information for a token configuration.

" + }, + "UserTokenConfigurationList":{ + "type":"list", + "member":{"shape":"UserTokenConfiguration"}, + "max":1 + }, "ValidationException":{ "type":"structure", "members":{ diff --git a/services/kinesis/pom.xml b/services/kinesis/pom.xml index 72f65860e8c3..dd8800b2d639 100644 --- a/services/kinesis/pom.xml +++ b/services/kinesis/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT kinesis AWS Java SDK :: Services :: Amazon Kinesis diff --git a/services/kinesisanalytics/pom.xml b/services/kinesisanalytics/pom.xml index 69fa17245ad0..5e33733b67cd 100644 --- a/services/kinesisanalytics/pom.xml +++ b/services/kinesisanalytics/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT kinesisanalytics AWS Java SDK :: Services :: Amazon Kinesis Analytics diff --git a/services/kinesisanalyticsv2/pom.xml b/services/kinesisanalyticsv2/pom.xml index ffac07ddab1a..5ad4262440c1 100644 --- a/services/kinesisanalyticsv2/pom.xml +++ b/services/kinesisanalyticsv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT kinesisanalyticsv2 AWS Java SDK :: Services :: Kinesis Analytics V2 diff --git a/services/kinesisanalyticsv2/src/main/resources/codegen-resources/service-2.json b/services/kinesisanalyticsv2/src/main/resources/codegen-resources/service-2.json index 1b6478ffaa4d..b8e3cdcbf284 100644 --- a/services/kinesisanalyticsv2/src/main/resources/codegen-resources/service-2.json +++ b/services/kinesisanalyticsv2/src/main/resources/codegen-resources/service-2.json @@ -137,6 +137,21 @@ ], "documentation":"

Creates a Kinesis Data Analytics application. For information about creating a Kinesis Data Analytics application, see Creating an Application.

" }, + "CreateApplicationPresignedUrl":{ + "name":"CreateApplicationPresignedUrl", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateApplicationPresignedUrlRequest"}, + "output":{"shape":"CreateApplicationPresignedUrlResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"}, + {"shape":"InvalidArgumentException"} + ], + "documentation":"

Creates and returns a URL that you can use to connect to an application's extension. Currently, the only available extension is the Apache Flink dashboard.

The IAM role or user used to call this API defines the permissions to access the extension. Once the presigned URL is created, no additional permission is required to access this URL. IAM authorization policies for this API are also enforced for every HTTP request that attempts to connect to the extension.

The URL that you get from a call to CreateApplicationPresignedUrl must be used within 3 minutes to be valid. If you first try to use the URL after the 3-minute limit expires, the service returns an HTTP 403 Forbidden error.

" + }, "CreateApplicationSnapshot":{ "name":"CreateApplicationSnapshot", "http":{ @@ -399,7 +414,7 @@ {"shape":"InvalidApplicationConfigurationException"}, {"shape":"ConcurrentModificationException"} ], - "documentation":"

Stops the application from processing data. You can stop an application only if it is in the running state. You can use the DescribeApplication operation to find the application state.

" + "documentation":"

Stops the application from processing data. You can stop an application only if it is in the running status, unless you set the Force parameter to true.

You can use the DescribeApplication operation to find the application status.

Kinesis Data Analytics takes a snapshot when the application is stopped, unless Force is set to true.

" }, "TagResource":{ "name":"TagResource", @@ -1017,6 +1032,11 @@ "max":999999999, "min":1 }, + "AuthorizedUrl":{ + "type":"string", + "max":2048, + "min":1 + }, "BooleanObject":{"type":"boolean"}, "BucketARN":{ "type":"string", @@ -1268,6 +1288,36 @@ "CUSTOM" ] }, + "CreateApplicationPresignedUrlRequest":{ + "type":"structure", + "required":[ + "ApplicationName", + "UrlType" + ], + "members":{ + "ApplicationName":{ + "shape":"ApplicationName", + "documentation":"

The name of the application.

" + }, + "UrlType":{ + "shape":"UrlType", + "documentation":"

The type of the extension for which to create and return a URL. Currently, the only valid extension URL type is FLINK_DASHBOARD_URL.

" + }, + "SessionExpirationDurationInSeconds":{ + "shape":"SessionExpirationDurationInSeconds", + "documentation":"

The duration in seconds for which the returned URL will be valid.

" + } + } + }, + "CreateApplicationPresignedUrlResponse":{ + "type":"structure", + "members":{ + "AuthorizedUrl":{ + "shape":"AuthorizedUrl", + "documentation":"

The URL of the extension.

" + } + } + }, "CreateApplicationRequest":{ "type":"structure", "required":[ @@ -2982,7 +3032,8 @@ "enum":[ "SQL-1_0", "FLINK-1_6", - "FLINK-1_8" + "FLINK-1_8", + "FLINK-1_11" ] }, "S3ApplicationCodeLocationDescription":{ @@ -3131,6 +3182,11 @@ "exception":true, "fault":true }, + "SessionExpirationDurationInSeconds":{ + "type":"long", + "max":43200, + "min":1800 + }, "SnapshotDetails":{ "type":"structure", "required":[ @@ -3307,7 +3363,7 @@ }, "Force":{ "shape":"BooleanObject", - "documentation":"

Set to true to force the application to stop. If you set Force to true, Kinesis Data Analytics stops the application without taking a snapshot.

You can only force stop a Flink-based Kinesis Data Analytics application. You can't force stop a SQL-based Kinesis Data Analytics application.

The application must be in the STARTING, UPDATING, STOPPING, AUTOSCALING, or RUNNING state.

" + "documentation":"

Set to true to force the application to stop. If you set Force to true, Kinesis Data Analytics stops the application without taking a snapshot.

Force-stopping your application may lead to data loss or duplication. To prevent data loss or duplicate processing of data during application restarts, we recommend you to take frequent snapshots of your application.

You can only force stop a Flink-based Kinesis Data Analytics application. You can't force stop a SQL-based Kinesis Data Analytics application.

The application must be in the STARTING, UPDATING, STOPPING, AUTOSCALING, or RUNNING status.

" } } }, @@ -3485,6 +3541,10 @@ } } }, + "UrlType":{ + "type":"string", + "enum":["FLINK_DASHBOARD_URL"] + }, "VpcConfiguration":{ "type":"structure", "required":[ diff --git a/services/kinesisvideo/pom.xml b/services/kinesisvideo/pom.xml index c5aea2a57203..24b72d0218b4 100644 --- a/services/kinesisvideo/pom.xml +++ b/services/kinesisvideo/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT 4.0.0 kinesisvideo diff --git a/services/kinesisvideoarchivedmedia/pom.xml b/services/kinesisvideoarchivedmedia/pom.xml index d62364f4a883..5f197cc9ca95 100644 --- a/services/kinesisvideoarchivedmedia/pom.xml +++ b/services/kinesisvideoarchivedmedia/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT kinesisvideoarchivedmedia AWS Java SDK :: Services :: Kinesis Video Archived Media diff --git a/services/kinesisvideomedia/pom.xml b/services/kinesisvideomedia/pom.xml index da1daab8fd29..e57bf799a6f2 100644 --- a/services/kinesisvideomedia/pom.xml +++ b/services/kinesisvideomedia/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT kinesisvideomedia AWS Java SDK :: Services :: Kinesis Video Media diff --git a/services/kinesisvideosignaling/pom.xml b/services/kinesisvideosignaling/pom.xml index 82405b7ff33c..7e9358a57b34 100644 --- a/services/kinesisvideosignaling/pom.xml +++ b/services/kinesisvideosignaling/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT kinesisvideosignaling AWS Java SDK :: Services :: Kinesis Video Signaling diff --git a/services/kms/pom.xml b/services/kms/pom.xml index 996dd00fbe25..fe1938dbffb9 100644 --- a/services/kms/pom.xml +++ b/services/kms/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT kms AWS Java SDK :: Services :: AWS KMS diff --git a/services/lakeformation/pom.xml b/services/lakeformation/pom.xml index 081be30bdfca..6dbe0528d529 100644 --- a/services/lakeformation/pom.xml +++ b/services/lakeformation/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT lakeformation AWS Java SDK :: Services :: LakeFormation diff --git a/services/lambda/pom.xml b/services/lambda/pom.xml index ad5bed0c6721..20492d077f65 100644 --- a/services/lambda/pom.xml +++ b/services/lambda/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT lambda AWS Java SDK :: Services :: AWS Lambda diff --git a/services/lambda/src/main/resources/codegen-resources/paginators-1.json b/services/lambda/src/main/resources/codegen-resources/paginators-1.json index dd31bcb2ceea..cd03e6bb38f0 100644 --- a/services/lambda/src/main/resources/codegen-resources/paginators-1.json +++ b/services/lambda/src/main/resources/codegen-resources/paginators-1.json @@ -6,6 +6,12 @@ "output_token": "NextMarker", "result_key": "Aliases" }, + "ListCodeSigningConfigs": { + "input_token": "Marker", + "limit_key": "MaxItems", + "output_token": "NextMarker", + "result_key": "CodeSigningConfigs" + }, "ListEventSourceMappings": { "input_token": "Marker", "limit_key": "MaxItems", @@ -24,6 +30,12 @@ "output_token": "NextMarker", "result_key": "Functions" }, + "ListFunctionsByCodeSigningConfig": { + "input_token": "Marker", + "limit_key": "MaxItems", + "output_token": "NextMarker", + "result_key": "FunctionArns" + }, "ListLayerVersions": { "input_token": "Marker", "limit_key": "MaxItems", diff --git a/services/lambda/src/main/resources/codegen-resources/service-2.json b/services/lambda/src/main/resources/codegen-resources/service-2.json index a44c27aa64b6..5774342f189a 100644 --- a/services/lambda/src/main/resources/codegen-resources/service-2.json +++ b/services/lambda/src/main/resources/codegen-resources/service-2.json @@ -68,6 +68,21 @@ ], "documentation":"

Creates an alias for a Lambda function version. Use aliases to provide clients with a function identifier that you can update to invoke a different version.

You can also map an alias to split invocation requests between two versions. Use the RoutingConfig parameter to specify a second version and the percentage of invocation requests that it receives.

" }, + "CreateCodeSigningConfig":{ + "name":"CreateCodeSigningConfig", + "http":{ + "method":"POST", + "requestUri":"/2020-04-22/code-signing-configs/", + "responseCode":201 + }, + "input":{"shape":"CreateCodeSigningConfigRequest"}, + "output":{"shape":"CreateCodeSigningConfigResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"InvalidParameterValueException"} + ], + "documentation":"

Creates a code signing configuration. A code signing configuration defines a list of allowed signing profiles and defines the code-signing validation policy (action to be taken if deployment validation checks fail).

" + }, "CreateEventSourceMapping":{ "name":"CreateEventSourceMapping", "http":{ @@ -84,7 +99,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Creates a mapping between an event source and an AWS Lambda function. Lambda reads items from the event source and triggers the function.

For details about each event source type, see the following topics.

The following error handling options are only available for stream sources (DynamoDB and Kinesis):

  • BisectBatchOnFunctionError - If the function returns an error, split the batch in two and retry.

  • DestinationConfig - Send discarded records to an Amazon SQS queue or Amazon SNS topic.

  • MaximumRecordAgeInSeconds - Discard records older than the specified age. Default -1 (infinite). Minimum 60. Maximum 604800.

  • MaximumRetryAttempts - Discard records after the specified number of retries. Default -1 (infinite). Minimum 0. Maximum 10000. When infinite, failed records will be retried until the record expires.

  • ParallelizationFactor - Process multiple batches from each shard concurrently.

" + "documentation":"

Creates a mapping between an event source and an AWS Lambda function. Lambda reads items from the event source and triggers the function.

For details about each event source type, see the following topics.

The following error handling options are only available for stream sources (DynamoDB and Kinesis):

  • BisectBatchOnFunctionError - If the function returns an error, split the batch in two and retry.

  • DestinationConfig - Send discarded records to an Amazon SQS queue or Amazon SNS topic.

  • MaximumRecordAgeInSeconds - Discard records older than the specified age. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires

  • MaximumRetryAttempts - Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires.

  • ParallelizationFactor - Process multiple batches from each shard concurrently.

" }, "CreateFunction":{ "name":"CreateFunction", @@ -101,9 +116,12 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ResourceConflictException"}, {"shape":"TooManyRequestsException"}, - {"shape":"CodeStorageExceededException"} + {"shape":"CodeStorageExceededException"}, + {"shape":"CodeVerificationFailedException"}, + {"shape":"InvalidCodeSignatureException"}, + {"shape":"CodeSigningConfigNotFoundException"} ], - "documentation":"

Creates a Lambda function. To create a function, you need a deployment package and an execution role. The deployment package contains your function code. The execution role grants the function permission to use AWS services, such as Amazon CloudWatch Logs for log streaming and AWS X-Ray for request tracing.

When you create a function, Lambda provisions an instance of the function and its supporting resources. If your function connects to a VPC, this process can take a minute or so. During this time, you can't invoke or modify the function. The State, StateReason, and StateReasonCode fields in the response from GetFunctionConfiguration indicate when the function is ready to invoke. For more information, see Function States.

A function has an unpublished version, and can have published versions and aliases. The unpublished version changes when you update your function's code and configuration. A published version is a snapshot of your function code and configuration that can't be changed. An alias is a named resource that maps to a version, and can be changed to map to a different version. Use the Publish parameter to create version 1 of your function from its initial configuration.

The other parameters let you configure version-specific and function-level settings. You can modify version-specific settings later with UpdateFunctionConfiguration. Function-level settings apply to both the unpublished and published versions of the function, and include tags (TagResource) and per-function concurrency limits (PutFunctionConcurrency).

If another account or an AWS service invokes your function, use AddPermission to grant permission by creating a resource-based IAM policy. You can grant permissions at the function level, on a version, or on an alias.

To invoke your function directly, use Invoke. To invoke your function in response to events in other AWS services, create an event source mapping (CreateEventSourceMapping), or configure a function trigger in the other service. For more information, see Invoking Functions.

" + "documentation":"

Creates a Lambda function. To create a function, you need a deployment package and an execution role. The deployment package is a ZIP archive or image container that contains your function code. The execution role grants the function permission to use AWS services, such as Amazon CloudWatch Logs for log streaming and AWS X-Ray for request tracing.

When you create a function, Lambda provisions an instance of the function and its supporting resources. If your function connects to a VPC, this process can take a minute or so. During this time, you can't invoke or modify the function. The State, StateReason, and StateReasonCode fields in the response from GetFunctionConfiguration indicate when the function is ready to invoke. For more information, see Function States.

A function has an unpublished version, and can have published versions and aliases. The unpublished version changes when you update your function's code and configuration. A published version is a snapshot of your function code and configuration that can't be changed. An alias is a named resource that maps to a version, and can be changed to map to a different version. Use the Publish parameter to create version 1 of your function from its initial configuration.

The other parameters let you configure version-specific and function-level settings. You can modify version-specific settings later with UpdateFunctionConfiguration. Function-level settings apply to both the unpublished and published versions of the function, and include tags (TagResource) and per-function concurrency limits (PutFunctionConcurrency).

You can use code signing if your deployment package is a ZIP archive. To enable code signing for this function, specify the ARN of a code-signing configuration. When a user attempts to deploy a code package with UpdateFunctionCode, Lambda checks that the code package has a valid signature from a trusted publisher. The code-signing configuration includes set set of signing profiles, which define the trusted publishers for this function.

If another account or an AWS service invokes your function, use AddPermission to grant permission by creating a resource-based IAM policy. You can grant permissions at the function level, on a version, or on an alias.

To invoke your function directly, use Invoke. To invoke your function in response to events in other AWS services, create an event source mapping (CreateEventSourceMapping), or configure a function trigger in the other service. For more information, see Invoking Functions.

" }, "DeleteAlias":{ "name":"DeleteAlias", @@ -121,6 +139,23 @@ ], "documentation":"

Deletes a Lambda function alias.

" }, + "DeleteCodeSigningConfig":{ + "name":"DeleteCodeSigningConfig", + "http":{ + "method":"DELETE", + "requestUri":"/2020-04-22/code-signing-configs/{CodeSigningConfigArn}", + "responseCode":204 + }, + "input":{"shape":"DeleteCodeSigningConfigRequest"}, + "output":{"shape":"DeleteCodeSigningConfigResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceConflictException"} + ], + "documentation":"

Deletes the code signing configuration. You can delete the code signing configuration only if no function is using it.

" + }, "DeleteEventSourceMapping":{ "name":"DeleteEventSourceMapping", "http":{ @@ -156,6 +191,24 @@ ], "documentation":"

Deletes a Lambda function. To delete a specific function version, use the Qualifier parameter. Otherwise, all versions and aliases are deleted.

To delete Lambda event source mappings that invoke a function, use DeleteEventSourceMapping. For AWS services and resources that invoke your function directly, delete the trigger in the service where you originally configured it.

" }, + "DeleteFunctionCodeSigningConfig":{ + "name":"DeleteFunctionCodeSigningConfig", + "http":{ + "method":"DELETE", + "requestUri":"/2020-06-30/functions/{FunctionName}/code-signing-config", + "responseCode":204 + }, + "input":{"shape":"DeleteFunctionCodeSigningConfigRequest"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"CodeSigningConfigNotFoundException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ResourceConflictException"} + ], + "documentation":"

Removes the code signing configuration from the function.

" + }, "DeleteFunctionConcurrency":{ "name":"DeleteFunctionConcurrency", "http":{ @@ -252,6 +305,22 @@ ], "documentation":"

Returns details about a Lambda function alias.

" }, + "GetCodeSigningConfig":{ + "name":"GetCodeSigningConfig", + "http":{ + "method":"GET", + "requestUri":"/2020-04-22/code-signing-configs/{CodeSigningConfigArn}", + "responseCode":200 + }, + "input":{"shape":"GetCodeSigningConfigRequest"}, + "output":{"shape":"GetCodeSigningConfigResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Returns information about the specified code signing configuration.

" + }, "GetEventSourceMapping":{ "name":"GetEventSourceMapping", "http":{ @@ -286,6 +355,23 @@ ], "documentation":"

Returns information about the function or function version, with a link to download the deployment package that's valid for 10 minutes. If you specify a function version, only details that are specific to that version are returned.

" }, + "GetFunctionCodeSigningConfig":{ + "name":"GetFunctionCodeSigningConfig", + "http":{ + "method":"GET", + "requestUri":"/2020-06-30/functions/{FunctionName}/code-signing-config", + "responseCode":200 + }, + "input":{"shape":"GetFunctionCodeSigningConfigRequest"}, + "output":{"shape":"GetFunctionCodeSigningConfigResponse"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

Returns the code signing configuration for the specified function.

" + }, "GetFunctionConcurrency":{ "name":"GetFunctionConcurrency", "http":{ @@ -497,6 +583,21 @@ ], "documentation":"

Returns a list of aliases for a Lambda function.

" }, + "ListCodeSigningConfigs":{ + "name":"ListCodeSigningConfigs", + "http":{ + "method":"GET", + "requestUri":"/2020-04-22/code-signing-configs/", + "responseCode":200 + }, + "input":{"shape":"ListCodeSigningConfigsRequest"}, + "output":{"shape":"ListCodeSigningConfigsResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"InvalidParameterValueException"} + ], + "documentation":"

Returns a list of code signing configurations. A request returns up to 10,000 configurations per call. You can use the MaxItems parameter to return fewer configurations per call.

" + }, "ListEventSourceMappings":{ "name":"ListEventSourceMappings", "http":{ @@ -547,6 +648,22 @@ ], "documentation":"

Returns a list of Lambda functions, with the version-specific configuration of each. Lambda returns up to 50 functions per call.

Set FunctionVersion to ALL to include all published versions of each function in addition to the unpublished version. To get more information about a function or version, use GetFunction.

" }, + "ListFunctionsByCodeSigningConfig":{ + "name":"ListFunctionsByCodeSigningConfig", + "http":{ + "method":"GET", + "requestUri":"/2020-04-22/code-signing-configs/{CodeSigningConfigArn}/functions", + "responseCode":200 + }, + "input":{"shape":"ListFunctionsByCodeSigningConfigRequest"}, + "output":{"shape":"ListFunctionsByCodeSigningConfigResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

List the functions that use the specified code signing configuration. You can use this method prior to deleting a code signing configuration, to verify that no functions are using it.

" + }, "ListLayerVersions":{ "name":"ListLayerVersions", "http":{ @@ -668,6 +785,25 @@ ], "documentation":"

Creates a version from the current code and configuration of a function. Use versions to create a snapshot of your function code and configuration that doesn't change.

AWS Lambda doesn't publish a version if the function's configuration and code haven't changed since the last version. Use UpdateFunctionCode or UpdateFunctionConfiguration to update the function before publishing a version.

Clients can invoke versions directly or with an alias. To create an alias, use CreateAlias.

" }, + "PutFunctionCodeSigningConfig":{ + "name":"PutFunctionCodeSigningConfig", + "http":{ + "method":"PUT", + "requestUri":"/2020-06-30/functions/{FunctionName}/code-signing-config", + "responseCode":200 + }, + "input":{"shape":"PutFunctionCodeSigningConfigRequest"}, + "output":{"shape":"PutFunctionCodeSigningConfigResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ResourceConflictException"}, + {"shape":"CodeSigningConfigNotFoundException"} + ], + "documentation":"

Update the code signing configuration for the function. Changes to the code signing configuration take effect the next time a user tries to deploy a code package to the function.

" + }, "PutFunctionConcurrency":{ "name":"PutFunctionConcurrency", "http":{ @@ -808,6 +944,22 @@ ], "documentation":"

Updates the configuration of a Lambda function alias.

" }, + "UpdateCodeSigningConfig":{ + "name":"UpdateCodeSigningConfig", + "http":{ + "method":"PUT", + "requestUri":"/2020-04-22/code-signing-configs/{CodeSigningConfigArn}", + "responseCode":200 + }, + "input":{"shape":"UpdateCodeSigningConfigRequest"}, + "output":{"shape":"UpdateCodeSigningConfigResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Update the code signing configuration. Changes to the code signing configuration take effect the next time a user tries to deploy a code package to the function.

" + }, "UpdateEventSourceMapping":{ "name":"UpdateEventSourceMapping", "http":{ @@ -825,7 +977,7 @@ {"shape":"ResourceConflictException"}, {"shape":"ResourceInUseException"} ], - "documentation":"

Updates an event source mapping. You can change the function that AWS Lambda invokes, or pause invocation and resume later from the same location.

The following error handling options are only available for stream sources (DynamoDB and Kinesis):

  • BisectBatchOnFunctionError - If the function returns an error, split the batch in two and retry.

  • DestinationConfig - Send discarded records to an Amazon SQS queue or Amazon SNS topic.

  • MaximumRecordAgeInSeconds - Discard records older than the specified age. Default -1 (infinite). Minimum 60. Maximum 604800.

  • MaximumRetryAttempts - Discard records after the specified number of retries. Default -1 (infinite). Minimum 0. Maximum 10000. When infinite, failed records will be retried until the record expires.

  • ParallelizationFactor - Process multiple batches from each shard concurrently.

" + "documentation":"

Updates an event source mapping. You can change the function that AWS Lambda invokes, or pause invocation and resume later from the same location.

The following error handling options are only available for stream sources (DynamoDB and Kinesis):

  • BisectBatchOnFunctionError - If the function returns an error, split the batch in two and retry.

  • DestinationConfig - Send discarded records to an Amazon SQS queue or Amazon SNS topic.

  • MaximumRecordAgeInSeconds - Discard records older than the specified age. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires

  • MaximumRetryAttempts - Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires.

  • ParallelizationFactor - Process multiple batches from each shard concurrently.

" }, "UpdateFunctionCode":{ "name":"UpdateFunctionCode", @@ -843,9 +995,12 @@ {"shape":"TooManyRequestsException"}, {"shape":"CodeStorageExceededException"}, {"shape":"PreconditionFailedException"}, - {"shape":"ResourceConflictException"} + {"shape":"ResourceConflictException"}, + {"shape":"CodeVerificationFailedException"}, + {"shape":"InvalidCodeSignatureException"}, + {"shape":"CodeSigningConfigNotFoundException"} ], - "documentation":"

Updates a Lambda function's code.

The function's code is locked when you publish a version. You can't modify the code of a published version, only the unpublished version.

" + "documentation":"

Updates a Lambda function's code. If code signing is enabled for the function, the code package must be signed by a trusted publisher. For more information, see Configuring code signing.

The function's code is locked when you publish a version. You can't modify the code of a published version, only the unpublished version.

" }, "UpdateFunctionConfiguration":{ "name":"UpdateFunctionConfiguration", @@ -862,7 +1017,10 @@ {"shape":"InvalidParameterValueException"}, {"shape":"TooManyRequestsException"}, {"shape":"ResourceConflictException"}, - {"shape":"PreconditionFailedException"} + {"shape":"PreconditionFailedException"}, + {"shape":"CodeVerificationFailedException"}, + {"shape":"InvalidCodeSignatureException"}, + {"shape":"CodeSigningConfigNotFoundException"} ], "documentation":"

Modify the version-specific settings of a Lambda function.

When you update a function, Lambda provisions an instance of the function and its supporting resources. If your function connects to a VPC, this process can take a minute. During this time, you can't modify the function, but you can still invoke it. The LastUpdateStatus, LastUpdateStatusReason, and LastUpdateStatusReasonCode fields in the response from GetFunctionConfiguration indicate when the update is complete and the function is processing events with the new configuration. For more information, see Function States.

These settings can vary between versions of a function and are locked when you publish a version. You can't modify the configuration of a published version, only the unpublished version.

To configure function concurrency, use PutFunctionConcurrency. To grant invoke permissions to an account or AWS service, use AddPermission.

" }, @@ -1109,6 +1267,17 @@ }, "documentation":"

The traffic-shifting configuration of a Lambda function alias.

" }, + "AllowedPublishers":{ + "type":"structure", + "required":["SigningProfileVersionArns"], + "members":{ + "SigningProfileVersionArns":{ + "shape":"SigningProfileVersionArns", + "documentation":"

The Amazon Resource Name (ARN) for each of the signing profiles. A signing profile defines a trusted user who can sign a code package.

" + } + }, + "documentation":"

List of signing profiles that can sign a code package.

" + }, "Arn":{ "type":"string", "pattern":"arn:(aws[a-zA-Z0-9-]*):([a-zA-Z0-9\\-])+:([a-z]{2}(-gov)?-[a-z]+-\\d{1})?:(\\d{12})?:(.*)" @@ -1128,6 +1297,83 @@ "streaming":true }, "Boolean":{"type":"boolean"}, + "CodeSigningConfig":{ + "type":"structure", + "required":[ + "CodeSigningConfigId", + "CodeSigningConfigArn", + "AllowedPublishers", + "CodeSigningPolicies", + "LastModified" + ], + "members":{ + "CodeSigningConfigId":{ + "shape":"CodeSigningConfigId", + "documentation":"

Unique identifer for the Code signing configuration.

" + }, + "CodeSigningConfigArn":{ + "shape":"CodeSigningConfigArn", + "documentation":"

The Amazon Resource Name (ARN) of the Code signing configuration.

" + }, + "Description":{ + "shape":"Description", + "documentation":"

Code signing configuration description.

" + }, + "AllowedPublishers":{ + "shape":"AllowedPublishers", + "documentation":"

List of allowed publishers.

" + }, + "CodeSigningPolicies":{ + "shape":"CodeSigningPolicies", + "documentation":"

The code signing policy controls the validation failure action for signature mismatch or expiry.

" + }, + "LastModified":{ + "shape":"Timestamp", + "documentation":"

The date and time that the Code signing configuration was last modified, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).

" + } + }, + "documentation":"

Details about a Code signing configuration.

" + }, + "CodeSigningConfigArn":{ + "type":"string", + "max":200, + "pattern":"arn:(aws[a-zA-Z-]*)?:lambda:[a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1}:\\d{12}:code-signing-config:csc-[a-z0-9]{17}" + }, + "CodeSigningConfigId":{ + "type":"string", + "pattern":"csc-[a-zA-Z0-9-_\\.]{17}" + }, + "CodeSigningConfigList":{ + "type":"list", + "member":{"shape":"CodeSigningConfig"} + }, + "CodeSigningConfigNotFoundException":{ + "type":"structure", + "members":{ + "Type":{"shape":"String"}, + "Message":{"shape":"String"} + }, + "documentation":"

The specified code signing configuration does not exist.

", + "error":{"httpStatusCode":404}, + "exception":true + }, + "CodeSigningPolicies":{ + "type":"structure", + "members":{ + "UntrustedArtifactOnDeployment":{ + "shape":"CodeSigningPolicy", + "documentation":"

Code signing configuration policy for deployment validation failure. If you set the policy to Enforce, Lambda blocks the deployment request if signature validation checks fail. If you set the policy to Warn, Lambda allows the deployment and creates a CloudWatch log.

Default value: Warn

" + } + }, + "documentation":"

Code signing configuration policies specifies the validation failure action for signature mismatch or expiry.

" + }, + "CodeSigningPolicy":{ + "type":"string", + "enum":[ + "Warn", + "Enforce" + ] + }, "CodeStorageExceededException":{ "type":"structure", "members":{ @@ -1141,10 +1387,20 @@ "error":{"httpStatusCode":400}, "exception":true }, + "CodeVerificationFailedException":{ + "type":"structure", + "members":{ + "Type":{"shape":"String"}, + "Message":{"shape":"String"} + }, + "documentation":"

The code signature failed one or more of the validation checks for signature mismatch or expiry, and the code signing policy is set to ENFORCE. Lambda blocks the deployment.

", + "error":{"httpStatusCode":400}, + "exception":true + }, "CompatibleRuntimes":{ "type":"list", "member":{"shape":"Runtime"}, - "max":5 + "max":15 }, "Concurrency":{ "type":"structure", @@ -1187,6 +1443,34 @@ } } }, + "CreateCodeSigningConfigRequest":{ + "type":"structure", + "required":["AllowedPublishers"], + "members":{ + "Description":{ + "shape":"Description", + "documentation":"

Descriptive name for this code signing configuration.

" + }, + "AllowedPublishers":{ + "shape":"AllowedPublishers", + "documentation":"

Signing profiles for this code signing configuration.

" + }, + "CodeSigningPolicies":{ + "shape":"CodeSigningPolicies", + "documentation":"

The code signing policies define the actions to take if the validation checks fail.

" + } + } + }, + "CreateCodeSigningConfigResponse":{ + "type":"structure", + "required":["CodeSigningConfig"], + "members":{ + "CodeSigningConfig":{ + "shape":"CodeSigningConfig", + "documentation":"

The code signing configuration.

" + } + } + }, "CreateEventSourceMappingRequest":{ "type":"structure", "required":[ @@ -1245,6 +1529,14 @@ "Topics":{ "shape":"Topics", "documentation":"

(MSK) The name of the Kafka topic.

" + }, + "Queues":{ + "shape":"Queues", + "documentation":"

(MQ) The name of the Amazon MQ broker destination queue to consume.

" + }, + "SourceAccessConfigurations":{ + "shape":"SourceAccessConfigurations", + "documentation":"

(MQ) The Secrets Manager secret that stores your broker credentials. To store your secret, use the following format: { \"username\": \"your username\", \"password\": \"your password\" }

To reference the secret, use the following format: [ { \"Type\": \"BASIC_AUTH\", \"URI\": \"secretARN\" } ]

The value of Type is always BASIC_AUTH. To encrypt the secret, you can use customer or service managed keys. When using a customer managed KMS key, the Lambda execution role requires kms:Decrypt permissions.

" } } }, @@ -1252,9 +1544,7 @@ "type":"structure", "required":[ "FunctionName", - "Runtime", "Role", - "Handler", "Code" ], "members":{ @@ -1298,6 +1588,10 @@ "shape":"VpcConfig", "documentation":"

For network connectivity to AWS resources in a VPC, specify a list of security groups and subnets in the VPC. When you connect a function to a VPC, it can only access resources and the internet through that VPC. For more information, see VPC Settings.

" }, + "PackageType":{ + "shape":"PackageType", + "documentation":"

The type of deployment package. Set to Image for container image and set Zip for ZIP archive.

" + }, "DeadLetterConfig":{ "shape":"DeadLetterConfig", "documentation":"

A dead letter queue configuration that specifies the queue or topic where Lambda sends asynchronous events when they fail processing. For more information, see Dead Letter Queues.

" @@ -1325,6 +1619,14 @@ "FileSystemConfigs":{ "shape":"FileSystemConfigList", "documentation":"

Connection settings for an Amazon EFS file system.

" + }, + "ImageConfig":{ + "shape":"ImageConfig", + "documentation":"

Configuration values that override the container image Dockerfile.

" + }, + "CodeSigningConfigArn":{ + "shape":"CodeSigningConfigArn", + "documentation":"

To enable code signing for this function, specify the ARN of a code-signing configuration. A code-signing configuration includes a set of signing profiles, which define the trusted publishers for this function.

" } } }, @@ -1360,6 +1662,23 @@ } } }, + "DeleteCodeSigningConfigRequest":{ + "type":"structure", + "required":["CodeSigningConfigArn"], + "members":{ + "CodeSigningConfigArn":{ + "shape":"CodeSigningConfigArn", + "documentation":"

The The Amazon Resource Name (ARN) of the code signing configuration.

", + "location":"uri", + "locationName":"CodeSigningConfigArn" + } + } + }, + "DeleteCodeSigningConfigResponse":{ + "type":"structure", + "members":{ + } + }, "DeleteEventSourceMappingRequest":{ "type":"structure", "required":["UUID"], @@ -1372,6 +1691,18 @@ } } }, + "DeleteFunctionCodeSigningConfigRequest":{ + "type":"structure", + "required":["FunctionName"], + "members":{ + "FunctionName":{ + "shape":"FunctionName", + "documentation":"

The name of the Lambda function.

Name formats

  • Function name - MyFunction.

  • Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.

  • Partial ARN - 123456789012:function:MyFunction.

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", + "location":"uri", + "locationName":"FunctionName" + } + } + }, "DeleteFunctionConcurrencyRequest":{ "type":"structure", "required":["FunctionName"], @@ -1629,17 +1960,25 @@ "shape":"String", "documentation":"

The identifier of the event source mapping.

" }, + "StartingPosition":{ + "shape":"EventSourcePosition", + "documentation":"

The position in a stream from which to start reading. Required for Amazon Kinesis, Amazon DynamoDB, and Amazon MSK Streams sources. AT_TIMESTAMP is only supported for Amazon Kinesis streams.

" + }, + "StartingPositionTimestamp":{ + "shape":"Date", + "documentation":"

With StartingPosition set to AT_TIMESTAMP, the time from which to start reading.

" + }, "BatchSize":{ "shape":"BatchSize", "documentation":"

The maximum number of items to retrieve in a single batch.

" }, "MaximumBatchingWindowInSeconds":{ "shape":"MaximumBatchingWindowInSeconds", - "documentation":"

(Streams) The maximum amount of time to gather records before invoking the function, in seconds.

" + "documentation":"

(Streams) The maximum amount of time to gather records before invoking the function, in seconds. The default value is zero.

" }, "ParallelizationFactor":{ "shape":"ParallelizationFactor", - "documentation":"

(Streams) The number of batches to process from each shard concurrently.

" + "documentation":"

(Streams) The number of batches to process from each shard concurrently. The default value is 1.

" }, "EventSourceArn":{ "shape":"Arn", @@ -1671,19 +2010,27 @@ }, "Topics":{ "shape":"Topics", - "documentation":"

(MSK) The name of the Kafka topic.

" + "documentation":"

(MSK) The name of the Kafka topic to consume.

" + }, + "Queues":{ + "shape":"Queues", + "documentation":"

(MQ) The name of the Amazon MQ broker destination queue to consume.

" + }, + "SourceAccessConfigurations":{ + "shape":"SourceAccessConfigurations", + "documentation":"

(MQ) The Secrets Manager secret that stores your broker credentials. To store your secret, use the following format: { \"username\": \"your username\", \"password\": \"your password\" }

To reference the secret, use the following format: [ { \"Type\": \"BASIC_AUTH\", \"URI\": \"secretARN\" } ]

The value of Type is always BASIC_AUTH. To encrypt the secret, you can use customer or service managed keys. When using a customer managed KMS key, the Lambda execution role requires kms:Decrypt permissions.

" }, "MaximumRecordAgeInSeconds":{ "shape":"MaximumRecordAgeInSeconds", - "documentation":"

(Streams) The maximum age of a record that Lambda sends to a function for processing.

" + "documentation":"

(Streams) Discard records older than the specified age. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires.

" }, "BisectBatchOnFunctionError":{ "shape":"BisectBatchOnFunctionError", - "documentation":"

(Streams) If the function returns an error, split the batch in two and retry.

" + "documentation":"

(Streams) If the function returns an error, split the batch in two and retry. The default value is false.

" }, "MaximumRetryAttempts":{ "shape":"MaximumRetryAttemptsEventSourceMapping", - "documentation":"

(Streams) The maximum number of times to retry when the function returns an error.

" + "documentation":"

(Streams) Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires.

" } }, "documentation":"

A mapping between an AWS resource and an AWS Lambda function. See CreateEventSourceMapping for details.

" @@ -1738,6 +2085,10 @@ "type":"string", "pattern":"arn:(aws[a-zA-Z-]*)?:lambda:[a-z]{2}(-gov)?-[a-z]+-\\d{1}:\\d{12}:function:[a-zA-Z0-9-_]+(:(\\$LATEST|[a-zA-Z0-9-_]+))?" }, + "FunctionArnList":{ + "type":"list", + "member":{"shape":"FunctionArn"} + }, "FunctionCode":{ "type":"structure", "members":{ @@ -1756,9 +2107,13 @@ "S3ObjectVersion":{ "shape":"S3ObjectVersion", "documentation":"

For versioned objects, the version of the deployment package object to use.

" + }, + "ImageUri":{ + "shape":"String", + "documentation":"

URI of a container image in the Amazon ECR registry.

" } }, - "documentation":"

The code for the Lambda function. You can specify either an object in Amazon S3, or upload a deployment package directly.

" + "documentation":"

The code for the Lambda function. You can specify either an object in Amazon S3, upload a ZIP archive deployment package directly, or specify the URI of a container image.

" }, "FunctionCodeLocation":{ "type":"structure", @@ -1770,6 +2125,14 @@ "Location":{ "shape":"String", "documentation":"

A presigned URL that you can use to download the deployment package.

" + }, + "ImageUri":{ + "shape":"String", + "documentation":"

URI of a container image in the Amazon ECR registry.

" + }, + "ResolvedImageUri":{ + "shape":"String", + "documentation":"

The resolved URI for the image.

" } }, "documentation":"

Details about a function's deployment package.

" @@ -1884,6 +2247,22 @@ "FileSystemConfigs":{ "shape":"FileSystemConfigList", "documentation":"

Connection settings for an Amazon EFS file system.

" + }, + "PackageType":{ + "shape":"PackageType", + "documentation":"

The type of deployment package. Set to Image for container image and set Zip for ZIP archive.

" + }, + "ImageConfigResponse":{ + "shape":"ImageConfigResponse", + "documentation":"

The function's image configuration values.

" + }, + "SigningProfileVersionArn":{ + "shape":"Arn", + "documentation":"

The ARN of the signing profile version.

" + }, + "SigningJobArn":{ + "shape":"Arn", + "documentation":"

The ARN of the signing job.

" } }, "documentation":"

Details about a function's configuration.

" @@ -1970,6 +2349,28 @@ } } }, + "GetCodeSigningConfigRequest":{ + "type":"structure", + "required":["CodeSigningConfigArn"], + "members":{ + "CodeSigningConfigArn":{ + "shape":"CodeSigningConfigArn", + "documentation":"

The The Amazon Resource Name (ARN) of the code signing configuration.

", + "location":"uri", + "locationName":"CodeSigningConfigArn" + } + } + }, + "GetCodeSigningConfigResponse":{ + "type":"structure", + "required":["CodeSigningConfig"], + "members":{ + "CodeSigningConfig":{ + "shape":"CodeSigningConfig", + "documentation":"

The code signing configuration

" + } + } + }, "GetEventSourceMappingRequest":{ "type":"structure", "required":["UUID"], @@ -1982,6 +2383,35 @@ } } }, + "GetFunctionCodeSigningConfigRequest":{ + "type":"structure", + "required":["FunctionName"], + "members":{ + "FunctionName":{ + "shape":"FunctionName", + "documentation":"

The name of the Lambda function.

Name formats

  • Function name - MyFunction.

  • Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.

  • Partial ARN - 123456789012:function:MyFunction.

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", + "location":"uri", + "locationName":"FunctionName" + } + } + }, + "GetFunctionCodeSigningConfigResponse":{ + "type":"structure", + "required":[ + "CodeSigningConfigArn", + "FunctionName" + ], + "members":{ + "CodeSigningConfigArn":{ + "shape":"CodeSigningConfigArn", + "documentation":"

The The Amazon Resource Name (ARN) of the code signing configuration.

" + }, + "FunctionName":{ + "shape":"FunctionName", + "documentation":"

The name of the Lambda function.

Name formats

  • Function name - MyFunction.

  • Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.

  • Partial ARN - 123456789012:function:MyFunction.

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

" + } + } + }, "GetFunctionConcurrencyRequest":{ "type":"structure", "required":["FunctionName"], @@ -2269,7 +2699,63 @@ "pattern":"[^\\s]+" }, "HttpStatus":{"type":"integer"}, + "ImageConfig":{ + "type":"structure", + "members":{ + "EntryPoint":{ + "shape":"StringList", + "documentation":"

Specifies the entry point to their application, which is typically the location of the runtime executable.

" + }, + "Command":{ + "shape":"StringList", + "documentation":"

Specifies parameters that you want to pass in with ENTRYPOINT.

" + }, + "WorkingDirectory":{ + "shape":"WorkingDirectory", + "documentation":"

Specifies the working directory.

" + } + }, + "documentation":"

Configuration values that override the container image Dockerfile. See Override Container settings.

" + }, + "ImageConfigError":{ + "type":"structure", + "members":{ + "ErrorCode":{ + "shape":"String", + "documentation":"

Error code.

" + }, + "Message":{ + "shape":"SensitiveString", + "documentation":"

Error message.

" + } + }, + "documentation":"

Error response to GetFunctionConfiguration.

" + }, + "ImageConfigResponse":{ + "type":"structure", + "members":{ + "ImageConfig":{ + "shape":"ImageConfig", + "documentation":"

Configuration values that override the container image Dockerfile.

" + }, + "Error":{ + "shape":"ImageConfigError", + "documentation":"

Error response to GetFunctionConfiguration.

" + } + }, + "documentation":"

Response to GetFunctionConfiguration request.

" + }, "Integer":{"type":"integer"}, + "InvalidCodeSignatureException":{ + "type":"structure", + "members":{ + "Type":{"shape":"String"}, + "Message":{"shape":"String"} + }, + "documentation":"

The code signature failed the integrity check. Lambda always blocks deployment if the integrity check fails, even if code signing policy is set to WARN.

", + "error":{"httpStatusCode":400}, + "exception":true + }, "InvalidParameterValueException":{ "type":"structure", "members":{ @@ -2519,7 +3005,9 @@ "InternalError", "SubnetOutOfIPAddresses", "InvalidSubnet", - "InvalidSecurityGroup" + "InvalidSecurityGroup", + "ImageDeleted", + "ImageAccessDenied" ] }, "Layer":{ @@ -2532,6 +3020,14 @@ "CodeSize":{ "shape":"Long", "documentation":"

The size of the layer archive in bytes.

" + }, + "SigningProfileVersionArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) for a signing profile version.

" + }, + "SigningJobArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of a signing job.

" } }, "documentation":"

An AWS Lambda layer.

" @@ -2602,6 +3098,14 @@ "CodeSize":{ "shape":"Long", "documentation":"

The size of the layer archive in bytes.

" + }, + "SigningProfileVersionArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) for a signing profile version.

" + }, + "SigningJobArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of a signing job.

" } }, "documentation":"

Details about a version of an AWS Lambda layer.

" @@ -2714,6 +3218,36 @@ } } }, + "ListCodeSigningConfigsRequest":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"String", + "documentation":"

Specify the pagination token that's returned by a previous request to retrieve the next page of results.

", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"MaxListItems", + "documentation":"

Maximum number of items to return.

", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListCodeSigningConfigsResponse":{ + "type":"structure", + "members":{ + "NextMarker":{ + "shape":"String", + "documentation":"

The pagination token that's included if more results are available.

" + }, + "CodeSigningConfigs":{ + "shape":"CodeSigningConfigList", + "documentation":"

The code signing configurations

" + } + } + }, "ListEventSourceMappingsRequest":{ "type":"structure", "members":{ @@ -2793,6 +3327,43 @@ } } }, + "ListFunctionsByCodeSigningConfigRequest":{ + "type":"structure", + "required":["CodeSigningConfigArn"], + "members":{ + "CodeSigningConfigArn":{ + "shape":"CodeSigningConfigArn", + "documentation":"

The The Amazon Resource Name (ARN) of the code signing configuration.

", + "location":"uri", + "locationName":"CodeSigningConfigArn" + }, + "Marker":{ + "shape":"String", + "documentation":"

Specify the pagination token that's returned by a previous request to retrieve the next page of results.

", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"MaxListItems", + "documentation":"

Maximum number of items to return.

", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListFunctionsByCodeSigningConfigResponse":{ + "type":"structure", + "members":{ + "NextMarker":{ + "shape":"String", + "documentation":"

The pagination token that's included if more results are available.

" + }, + "FunctionArns":{ + "shape":"FunctionArnList", + "documentation":"

The function ARNs.

" + } + } + }, "ListFunctionsRequest":{ "type":"structure", "members":{ @@ -3074,7 +3645,7 @@ }, "MemorySize":{ "type":"integer", - "max":3008, + "max":10240, "min":128 }, "NameSpacedFunctionArn":{ @@ -3121,6 +3692,13 @@ "type":"string", "pattern":"o-[a-z0-9]{10,32}" }, + "PackageType":{ + "type":"string", + "enum":[ + "Zip", + "Image" + ] + }, "ParallelizationFactor":{ "type":"integer", "max":10, @@ -3308,6 +3886,42 @@ } } }, + "PutFunctionCodeSigningConfigRequest":{ + "type":"structure", + "required":[ + "CodeSigningConfigArn", + "FunctionName" + ], + "members":{ + "CodeSigningConfigArn":{ + "shape":"CodeSigningConfigArn", + "documentation":"

The The Amazon Resource Name (ARN) of the code signing configuration.

" + }, + "FunctionName":{ + "shape":"FunctionName", + "documentation":"

The name of the Lambda function.

Name formats

  • Function name - MyFunction.

  • Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.

  • Partial ARN - 123456789012:function:MyFunction.

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", + "location":"uri", + "locationName":"FunctionName" + } + } + }, + "PutFunctionCodeSigningConfigResponse":{ + "type":"structure", + "required":[ + "CodeSigningConfigArn", + "FunctionName" + ], + "members":{ + "CodeSigningConfigArn":{ + "shape":"CodeSigningConfigArn", + "documentation":"

The The Amazon Resource Name (ARN) of the code signing configuration.

" + }, + "FunctionName":{ + "shape":"FunctionName", + "documentation":"

The name of the Lambda function.

Name formats

  • Function name - MyFunction.

  • Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.

  • Partial ARN - 123456789012:function:MyFunction.

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

" + } + } + }, "PutFunctionConcurrencyRequest":{ "type":"structure", "required":[ @@ -3418,6 +4032,18 @@ "min":1, "pattern":"(|[a-zA-Z0-9$_-]+)" }, + "Queue":{ + "type":"string", + "max":1000, + "min":1, + "pattern":"[\\s\\S]*" + }, + "Queues":{ + "type":"list", + "member":{"shape":"Queue"}, + "max":1, + "min":1 + }, "RemoveLayerVersionPermissionRequest":{ "type":"structure", "required":[ @@ -3623,6 +4249,36 @@ "error":{"httpStatusCode":500}, "exception":true }, + "SigningProfileVersionArns":{ + "type":"list", + "member":{"shape":"Arn"}, + "max":20, + "min":1 + }, + "SourceAccessConfiguration":{ + "type":"structure", + "members":{ + "Type":{ + "shape":"SourceAccessType", + "documentation":"

To reference the secret, use the following format: [ { \"Type\": \"BASIC_AUTH\", \"URI\": \"secretARN\" } ]

The value of Type is always BASIC_AUTH. To encrypt the secret, you can use customer or service managed keys. When using a customer managed KMS key, the Lambda execution role requires kms:Decrypt permissions.

" + }, + "URI":{ + "shape":"Arn", + "documentation":"

To reference the secret, use the following format: [ { \"Type\": \"BASIC_AUTH\", \"URI\": \"secretARN\" } ]

The value of Type is always BASIC_AUTH. To encrypt the secret, you can use customer or service managed keys. When using a customer managed KMS key, the Lambda execution role requires kms:Decrypt permissions.

" + } + }, + "documentation":"

(MQ) The Secrets Manager secret that stores your broker credentials. To store your secret, use the following format: { \"username\": \"your username\", \"password\": \"your password\" }

" + }, + "SourceAccessConfigurations":{ + "type":"list", + "member":{"shape":"SourceAccessConfiguration"}, + "max":1, + "min":1 + }, + "SourceAccessType":{ + "type":"string", + "enum":["BASIC_AUTH"] + }, "SourceOwner":{ "type":"string", "pattern":"\\d{12}" @@ -3649,7 +4305,9 @@ "InternalError", "SubnetOutOfIPAddresses", "InvalidSubnet", - "InvalidSecurityGroup" + "InvalidSecurityGroup", + "ImageDeleted", + "ImageAccessDenied" ] }, "StatementId":{ @@ -3659,6 +4317,11 @@ "pattern":"([a-zA-Z0-9-_]+)" }, "String":{"type":"string"}, + "StringList":{ + "type":"list", + "member":{"shape":"String"}, + "max":1500 + }, "SubnetIPAddressLimitReachedException":{ "type":"structure", "members":{ @@ -3848,6 +4511,40 @@ } } }, + "UpdateCodeSigningConfigRequest":{ + "type":"structure", + "required":["CodeSigningConfigArn"], + "members":{ + "CodeSigningConfigArn":{ + "shape":"CodeSigningConfigArn", + "documentation":"

The The Amazon Resource Name (ARN) of the code signing configuration.

", + "location":"uri", + "locationName":"CodeSigningConfigArn" + }, + "Description":{ + "shape":"Description", + "documentation":"

Descriptive name for this code signing configuration.

" + }, + "AllowedPublishers":{ + "shape":"AllowedPublishers", + "documentation":"

Signing profiles for this code signing configuration.

" + }, + "CodeSigningPolicies":{ + "shape":"CodeSigningPolicies", + "documentation":"

The code signing policy.

" + } + } + }, + "UpdateCodeSigningConfigResponse":{ + "type":"structure", + "required":["CodeSigningConfig"], + "members":{ + "CodeSigningConfig":{ + "shape":"CodeSigningConfig", + "documentation":"

The code signing configuration

" + } + } + }, "UpdateEventSourceMappingRequest":{ "type":"structure", "required":["UUID"], @@ -3893,6 +4590,10 @@ "ParallelizationFactor":{ "shape":"ParallelizationFactor", "documentation":"

(Streams) The number of batches to process from each shard concurrently.

" + }, + "SourceAccessConfigurations":{ + "shape":"SourceAccessConfigurations", + "documentation":"

(MQ) The Secrets Manager secret that stores your broker credentials. To store your secret, use the following format: { \"username\": \"your username\", \"password\": \"your password\" }

To reference the secret, use the following format: [ { \"Type\": \"BASIC_AUTH\", \"URI\": \"secretARN\" } ]

The value of Type is always BASIC_AUTH. To encrypt the secret, you can use customer or service managed keys. When using a customer managed KMS key, the Lambda execution role requires kms:Decrypt permissions.

" } } }, @@ -3922,6 +4623,10 @@ "shape":"S3ObjectVersion", "documentation":"

For versioned objects, the version of the deployment package object to use.

" }, + "ImageUri":{ + "shape":"String", + "documentation":"

URI of a container image in the Amazon ECR registry.

" + }, "Publish":{ "shape":"Boolean", "documentation":"

Set to true to publish a new version of the function after updating the code. This has the same effect as calling PublishVersion separately.

" @@ -4001,6 +4706,10 @@ "FileSystemConfigs":{ "shape":"FileSystemConfigList", "documentation":"

Connection settings for an Amazon EFS file system.

" + }, + "ImageConfig":{ + "shape":"ImageConfig", + "documentation":"

Configuration values that override the container image Dockerfile.

" } } }, @@ -4077,6 +4786,10 @@ "type":"double", "max":1.0, "min":0.0 + }, + "WorkingDirectory":{ + "type":"string", + "max":1000 } }, "documentation":"AWS Lambda

Overview

This is the AWS Lambda API Reference. The AWS Lambda Developer Guide provides additional information. For the service overview, see What is AWS Lambda, and for information about how the service works, see AWS Lambda: How it Works in the AWS Lambda Developer Guide.

" diff --git a/services/lexmodelbuilding/pom.xml b/services/lexmodelbuilding/pom.xml index 2763f9b4eb75..76f01ebb3426 100644 --- a/services/lexmodelbuilding/pom.xml +++ b/services/lexmodelbuilding/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT lexmodelbuilding AWS Java SDK :: Services :: Amazon Lex Model Building diff --git a/services/lexmodelbuilding/src/main/resources/codegen-resources/service-2.json b/services/lexmodelbuilding/src/main/resources/codegen-resources/service-2.json index d3071c5fef32..042f844cf1b1 100644 --- a/services/lexmodelbuilding/src/main/resources/codegen-resources/service-2.json +++ b/services/lexmodelbuilding/src/main/resources/codegen-resources/service-2.json @@ -979,6 +979,16 @@ "CustomPayload" ] }, + "ContextTimeToLiveInSeconds":{ + "type":"integer", + "max":86400, + "min":5 + }, + "ContextTurnsToLive":{ + "type":"integer", + "max":20, + "min":1 + }, "ConversationLogsRequest":{ "type":"structure", "required":[ @@ -1183,6 +1193,14 @@ "kendraConfiguration":{ "shape":"KendraConfiguration", "documentation":"

Configuration information, if any, for connecting an Amazon Kendra index with the AMAZON.KendraSearchIntent intent.

" + }, + "inputContexts":{ + "shape":"InputContextList", + "documentation":"

An array of InputContext objects that lists the contexts that must be active for Amazon Lex to choose the intent in a conversation with the user.

" + }, + "outputContexts":{ + "shape":"OutputContextList", + "documentation":"

An array of OutputContext objects that lists the contexts that the intent activates when the intent is fulfilled.

" } } }, @@ -2216,6 +2234,14 @@ "kendraConfiguration":{ "shape":"KendraConfiguration", "documentation":"

Configuration information, if any, to connect to an Amazon Kendra index with the AMAZON.KendraSearchIntent intent.

" + }, + "inputContexts":{ + "shape":"InputContextList", + "documentation":"

An array of InputContext objects that lists the contexts that must be active for Amazon Lex to choose the intent in a conversation with the user.

" + }, + "outputContexts":{ + "shape":"OutputContextList", + "documentation":"

An array of OutputContext objects that lists the contexts that the intent activates when the intent is fulfilled.

" } } }, @@ -2492,6 +2518,29 @@ "FAILED" ] }, + "InputContext":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{ + "shape":"InputContextName", + "documentation":"

The name of the context.

" + } + }, + "documentation":"

The name of a context that must be active for an intent to be selected by Amazon Lex.

" + }, + "InputContextList":{ + "type":"list", + "member":{"shape":"InputContext"}, + "max":5, + "min":0 + }, + "InputContextName":{ + "type":"string", + "max":100, + "min":1, + "pattern":"^([A-Za-z]_?)+$" + }, "Intent":{ "type":"structure", "required":[ @@ -2656,7 +2705,12 @@ "en-AU", "en-GB", "en-US", - "es-US" + "es-419", + "es-ES", + "es-US", + "fr-FR", + "fr-CA", + "it-IT" ] }, "LocaleList":{ @@ -2806,6 +2860,41 @@ "DEFAULT_OBFUSCATION" ] }, + "OutputContext":{ + "type":"structure", + "required":[ + "name", + "timeToLiveInSeconds", + "turnsToLive" + ], + "members":{ + "name":{ + "shape":"OutputContextName", + "documentation":"

The name of the context.

" + }, + "timeToLiveInSeconds":{ + "shape":"ContextTimeToLiveInSeconds", + "documentation":"

The number of seconds that the context should be active after it is first sent in a PostContent or PostText response. You can set the value between 5 and 86,400 seconds (24 hours).

" + }, + "turnsToLive":{ + "shape":"ContextTurnsToLive", + "documentation":"

The number of conversation turns that the context should be active. A conversation turn is one PostContent or PostText request and the corresponding response from Amazon Lex.

" + } + }, + "documentation":"

The specification of an output context that is set when an intent is fulfilled.

" + }, + "OutputContextList":{ + "type":"list", + "member":{"shape":"OutputContext"}, + "max":10, + "min":0 + }, + "OutputContextName":{ + "type":"string", + "max":100, + "min":1, + "pattern":"^([A-Za-z]_?)+$" + }, "PreconditionFailedException":{ "type":"structure", "members":{ @@ -2961,11 +3050,11 @@ }, "enableModelImprovements":{ "shape":"Boolean", - "documentation":"

Set to true to enable access to natural language understanding improvements.

When you set the enableModelImprovements parameter to true you can use the nluIntentConfidenceThreshold parameter to configure confidence scores. For more information, see Confidence Scores.

You can only set the enableModelImprovements parameter in certain Regions. If you set the parameter to true, your bot has access to accuracy improvements.

The Regions where you can set the enableModelImprovements parameter to true are:

  • US East (N. Virginia) (us-east-1)

  • US West (Oregon) (us-west-2)

  • Asia Pacific (Sydney) (ap-southeast-2)

  • EU (Ireland) (eu-west-1)

In other Regions, the enableModelImprovements parameter is set to true by default. In these Regions setting the parameter to false throws a ValidationException exception.

  • Asia Pacific (Singapore) (ap-southeast-1)

  • Asia Pacific (Tokyo) (ap-northeast-1)

  • EU (Frankfurt) (eu-central-1)

  • EU (London) (eu-west-2)

" + "documentation":"

Set to true to enable access to natural language understanding improvements.

When you set the enableModelImprovements parameter to true you can use the nluIntentConfidenceThreshold parameter to configure confidence scores. For more information, see Confidence Scores.

You can only set the enableModelImprovements parameter in certain Regions. If you set the parameter to true, your bot has access to accuracy improvements.

The Regions where you can set the enableModelImprovements parameter to true are:

  • US East (N. Virginia) (us-east-1)

  • US West (Oregon) (us-west-2)

  • Asia Pacific (Sydney) (ap-southeast-2)

  • EU (Ireland) (eu-west-1)

In other Regions, the enableModelImprovements parameter is set to true by default. In these Regions setting the parameter to false throws a ValidationException exception.

" }, "nluIntentConfidenceThreshold":{ "shape":"ConfidenceThreshold", - "documentation":"

Determines the threshold where Amazon Lex will insert the AMAZON.FallbackIntent, AMAZON.KendraSearchIntent, or both when returning alternative intents in a PostContent or PostText response. AMAZON.FallbackIntent and AMAZON.KendraSearchIntent are only inserted if they are configured for the bot.

You must set the enableModelImprovements parameter to true to use confidence scores.

  • US East (N. Virginia) (us-east-1)

  • US West (Oregon) (us-west-2)

  • Asia Pacific (Sydney) (ap-southeast-2)

  • EU (Ireland) (eu-west-1)

In other Regions, the enableModelImprovements parameter is set to true by default.

For example, suppose a bot is configured with the confidence threshold of 0.80 and the AMAZON.FallbackIntent. Amazon Lex returns three alternative intents with the following confidence scores: IntentA (0.70), IntentB (0.60), IntentC (0.50). The response from the PostText operation would be:

  • AMAZON.FallbackIntent

  • IntentA

  • IntentB

  • IntentC

" + "documentation":"

Determines the threshold where Amazon Lex will insert the AMAZON.FallbackIntent, AMAZON.KendraSearchIntent, or both when returning alternative intents in a PostContent or PostText response. AMAZON.FallbackIntent and AMAZON.KendraSearchIntent are only inserted if they are configured for the bot.

You must set the enableModelImprovements parameter to true to use confidence scores in the following regions.

  • US East (N. Virginia) (us-east-1)

  • US West (Oregon) (us-west-2)

  • Asia Pacific (Sydney) (ap-southeast-2)

  • EU (Ireland) (eu-west-1)

In other Regions, the enableModelImprovements parameter is set to true by default.

For example, suppose a bot is configured with the confidence threshold of 0.80 and the AMAZON.FallbackIntent. Amazon Lex returns three alternative intents with the following confidence scores: IntentA (0.70), IntentB (0.60), IntentC (0.50). The response from the PostText operation would be:

  • AMAZON.FallbackIntent

  • IntentA

  • IntentB

  • IntentC

" }, "clarificationPrompt":{ "shape":"Prompt", @@ -3159,6 +3248,14 @@ "kendraConfiguration":{ "shape":"KendraConfiguration", "documentation":"

Configuration information required to use the AMAZON.KendraSearchIntent intent to connect to an Amazon Kendra index. For more information, see AMAZON.KendraSearchIntent.

" + }, + "inputContexts":{ + "shape":"InputContextList", + "documentation":"

An array of InputContext objects that lists the contexts that must be active for Amazon Lex to choose the intent in a conversation with the user.

" + }, + "outputContexts":{ + "shape":"OutputContextList", + "documentation":"

An array of OutputContext objects that lists the contexts that the intent activates when the intent is fulfilled.

" } } }, @@ -3232,6 +3329,14 @@ "kendraConfiguration":{ "shape":"KendraConfiguration", "documentation":"

Configuration information, if any, required to connect to an Amazon Kendra index and use the AMAZON.KendraSearchIntent intent.

" + }, + "inputContexts":{ + "shape":"InputContextList", + "documentation":"

An array of InputContext objects that lists the contexts that must be active for Amazon Lex to choose the intent in a conversation with the user.

" + }, + "outputContexts":{ + "shape":"OutputContextList", + "documentation":"

An array of OutputContext objects that lists the contexts that the intent activates when the intent is fulfilled.

" } } }, @@ -3440,6 +3545,10 @@ "obfuscationSetting":{ "shape":"ObfuscationSetting", "documentation":"

Determines whether a slot is obfuscated in conversation logs and stored utterances. When you obfuscate a slot, the value is replaced by the slot name in curly braces ({}). For example, if the slot name is \"full_name\", obfuscated values are replaced with \"{full_name}\". For more information, see Slot Obfuscation .

" + }, + "defaultValueSpec":{ + "shape":"SlotDefaultValueSpec", + "documentation":"

A list of default values for the slot. Default values are used when Amazon Lex hasn't determined a value for a slot. You can specify default values from context variables, session attributes, and defined values.

" } }, "documentation":"

Identifies the version of a specific slot.

" @@ -3451,6 +3560,39 @@ "Optional" ] }, + "SlotDefaultValue":{ + "type":"structure", + "required":["defaultValue"], + "members":{ + "defaultValue":{ + "shape":"SlotDefaultValueString", + "documentation":"

The default value for the slot. You can specify one of the following:

  • #context-name.slot-name - The slot value \"slot-name\" in the context \"context-name.\"

  • {attribute} - The slot value of the session attribute \"attribute.\"

  • 'value' - The discrete value \"value.\"

" + } + }, + "documentation":"

A default value for a slot.

" + }, + "SlotDefaultValueList":{ + "type":"list", + "member":{"shape":"SlotDefaultValue"}, + "max":10, + "min":0 + }, + "SlotDefaultValueSpec":{ + "type":"structure", + "required":["defaultValueList"], + "members":{ + "defaultValueList":{ + "shape":"SlotDefaultValueList", + "documentation":"

The default values for a slot. You can specify more than one default. For example, you can specify a default value to use from a matching context variable, a session attribute, or a fixed value.

The default value chosen is selected based on the order that you specify them in the list. For example, if you specify a context variable and a fixed value in that order, Amazon Lex uses the context variable if it is available, else it uses the fixed value.

" + } + }, + "documentation":"

Contains the default values for a slot. Default values are used when Amazon Lex hasn't determined a value for a slot.

" + }, + "SlotDefaultValueString":{ + "type":"string", + "max":202, + "min":1 + }, "SlotList":{ "type":"list", "member":{"shape":"Slot"}, diff --git a/services/lexruntime/pom.xml b/services/lexruntime/pom.xml index 90c63061d89c..82e34a8f34a3 100644 --- a/services/lexruntime/pom.xml +++ b/services/lexruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT lexruntime AWS Java SDK :: Services :: Amazon Lex Runtime diff --git a/services/lexruntime/src/main/resources/codegen-resources/service-2.json b/services/lexruntime/src/main/resources/codegen-resources/service-2.json index b4d3e5c5b1b9..f3df91d83d92 100644 --- a/services/lexruntime/src/main/resources/codegen-resources/service-2.json +++ b/services/lexruntime/src/main/resources/codegen-resources/service-2.json @@ -112,6 +112,77 @@ }, "shapes":{ "Accept":{"type":"string"}, + "ActiveContext":{ + "type":"structure", + "required":[ + "name", + "timeToLive", + "parameters" + ], + "members":{ + "name":{ + "shape":"ActiveContextName", + "documentation":"

The name of the context.

" + }, + "timeToLive":{ + "shape":"ActiveContextTimeToLive", + "documentation":"

The length of time or number of turns that a context remains active.

" + }, + "parameters":{ + "shape":"ActiveContextParametersMap", + "documentation":"

State variables for the current context. You can use these values as default values for slots in subsequent events.

" + } + }, + "documentation":"

A context is a variable that contains information about the current state of the conversation between a user and Amazon Lex. Context can be set automatically by Amazon Lex when an intent is fulfilled, or it can be set at runtime using the PutContent, PutText, or PutSession operation.

" + }, + "ActiveContextName":{ + "type":"string", + "max":100, + "min":1, + "pattern":"^([A-Za-z]_?)+$" + }, + "ActiveContextParametersMap":{ + "type":"map", + "key":{"shape":"ParameterName"}, + "value":{"shape":"Text"}, + "max":10, + "min":0 + }, + "ActiveContextTimeToLive":{ + "type":"structure", + "members":{ + "timeToLiveInSeconds":{ + "shape":"ActiveContextTimeToLiveInSeconds", + "documentation":"

The number of seconds that the context should be active after it is first sent in a PostContent or PostText response. You can set the value between 5 and 86,400 seconds (24 hours).

" + }, + "turnsToLive":{ + "shape":"ActiveContextTurnsToLive", + "documentation":"

The number of conversation turns that the context should be active. A conversation turn is one PostContent or PostText request and the corresponding response from Amazon Lex.

" + } + }, + "documentation":"

The length of time or number of turns that a context remains active.

" + }, + "ActiveContextTimeToLiveInSeconds":{ + "type":"integer", + "max":86400, + "min":5 + }, + "ActiveContextTurnsToLive":{ + "type":"integer", + "max":20, + "min":1 + }, + "ActiveContextsList":{ + "type":"list", + "member":{"shape":"ActiveContext"}, + "max":20, + "min":0, + "sensitive":true + }, + "ActiveContextsString":{ + "type":"string", + "sensitive":true + }, "AttributesString":{ "type":"string", "sensitive":true @@ -397,6 +468,10 @@ "dialogAction":{ "shape":"DialogAction", "documentation":"

Describes the current state of the bot.

" + }, + "activeContexts":{ + "shape":"ActiveContextsList", + "documentation":"

A list of active contexts for the session. A context can be set when an intent is fulfilled or by calling the PostContent, PostText, or PutSession operation.

You can use a context to control the intents that can follow up an intent, or to modify the operation of your application.

" } } }, @@ -524,6 +599,11 @@ "error":{"httpStatusCode":404}, "exception":true }, + "ParameterName":{ + "type":"string", + "max":100, + "min":1 + }, "PostContentRequest":{ "type":"structure", "required":[ @@ -581,6 +661,13 @@ "inputStream":{ "shape":"BlobStream", "documentation":"

User input in PCM or Opus audio format or text format as described in the Content-Type HTTP header.

You can stream audio data to Amazon Lex or you can create a local buffer that captures all of the audio data before sending. In general, you get better performance if you stream audio data rather than buffering the data locally.

" + }, + "activeContexts":{ + "shape":"ActiveContextsString", + "documentation":"

A list of contexts active for the request. A context can be activated when a previous intent is fulfilled, or by including the context in the request,

If you don't specify a list of contexts, Amazon Lex will use the current list of contexts for the session. If you specify an empty list, all contexts for the session are cleared.

", + "jsonvalue":true, + "location":"header", + "locationName":"x-amz-lex-active-contexts" } }, "payload":"inputStream" @@ -602,7 +689,7 @@ }, "nluIntentConfidence":{ "shape":"String", - "documentation":"

Provides a score that indicates how confident Amazon Lex is that the returned intent is the one that matches the user's intent. The score is between 0.0 and 1.0.

The score is a relative score, not an absolute score. The score may change based on improvements to the Amazon Lex NLU.

", + "documentation":"

Provides a score that indicates how confident Amazon Lex is that the returned intent is the one that matches the user's intent. The score is between 0.0 and 1.0.

The score is a relative score, not an absolute score. The score may change based on improvements to Amazon Lex.

", "jsonvalue":true, "location":"header", "locationName":"x-amz-lex-nlu-intent-confidence" @@ -670,7 +757,7 @@ }, "botVersion":{ "shape":"BotVersion", - "documentation":"

The version of the bot that responded to the conversation. You can use this information to help determine if one version of a bot is performing better than another version.

If you have enabled the new natural language understanding (NLU) model, you can use this to determine if the improvement is due to changes to the bot or changes to the NLU.

For more information about enabling the new NLU, see the enableModelImprovements parameter of the PutBot operation.

", + "documentation":"

The version of the bot that responded to the conversation. You can use this information to help determine if one version of a bot is performing better than another version.

", "location":"header", "locationName":"x-amz-lex-bot-version" }, @@ -679,6 +766,13 @@ "documentation":"

The unique identifier for the session.

", "location":"header", "locationName":"x-amz-lex-session-id" + }, + "activeContexts":{ + "shape":"ActiveContextsString", + "documentation":"

A list of active contexts for the session. A context can be set when an intent is fulfilled or by calling the PostContent, PostText, or PutSession operation.

You can use a context to control the intents that can follow up an intent, or to modify the operation of your application.

", + "jsonvalue":true, + "location":"header", + "locationName":"x-amz-lex-active-contexts" } }, "payload":"audioStream" @@ -721,6 +815,10 @@ "inputText":{ "shape":"Text", "documentation":"

The text that the user entered (Amazon Lex interprets this text).

" + }, + "activeContexts":{ + "shape":"ActiveContextsList", + "documentation":"

A list of contexts active for the request. A context can be activated when a previous intent is fulfilled, or by including the context in the request,

If you don't specify a list of contexts, Amazon Lex will use the current list of contexts for the session. If you specify an empty list, all contexts for the session are cleared.

" } } }, @@ -733,7 +831,7 @@ }, "nluIntentConfidence":{ "shape":"IntentConfidence", - "documentation":"

Provides a score that indicates how confident Amazon Lex is that the returned intent is the one that matches the user's intent. The score is between 0.0 and 1.0. For more information, see Confidence Scores.

The score is a relative score, not an absolute score. The score may change based on improvements to the Amazon Lex natural language understanding (NLU) model.

" + "documentation":"

Provides a score that indicates how confident Amazon Lex is that the returned intent is the one that matches the user's intent. The score is between 0.0 and 1.0. For more information, see Confidence Scores.

The score is a relative score, not an absolute score. The score may change based on improvements to Amazon Lex.

" }, "alternativeIntents":{ "shape":"IntentList", @@ -777,7 +875,11 @@ }, "botVersion":{ "shape":"BotVersion", - "documentation":"

The version of the bot that responded to the conversation. You can use this information to help determine if one version of a bot is performing better than another version.

If you have enabled the new natural language understanding (NLU) model, you can use this to determine if the improvement is due to changes to the bot or changes to the NLU.

For more information about enabling the new NLU, see the enableModelImprovements parameter of the PutBot operation.

" + "documentation":"

The version of the bot that responded to the conversation. You can use this information to help determine if one version of a bot is performing better than another version.

" + }, + "activeContexts":{ + "shape":"ActiveContextsList", + "documentation":"

A list of active contexts for the session. A context can be set when an intent is fulfilled or by calling the PostContent, PostText, or PutSession operation.

You can use a context to control the intents that can follow up an intent, or to modify the operation of your application.

" } } }, @@ -842,6 +944,10 @@ "documentation":"

The message that Amazon Lex returns in the response can be either text or speech based depending on the value of this field.

  • If the value is text/plain; charset=utf-8, Amazon Lex returns text in the response.

  • If the value begins with audio/, Amazon Lex returns speech in the response. Amazon Lex uses Amazon Polly to generate the speech in the configuration that you specify. For example, if you specify audio/mpeg as the value, Amazon Lex returns speech in the MPEG format.

  • If the value is audio/pcm, the speech is returned as audio/pcm in 16-bit, little endian format.

  • The following are the accepted values:

    • audio/mpeg

    • audio/ogg

    • audio/pcm

    • audio/* (defaults to mpeg)

    • text/plain; charset=utf-8

", "location":"header", "locationName":"Accept" + }, + "activeContexts":{ + "shape":"ActiveContextsList", + "documentation":"

A list of contexts active for the request. A context can be activated when a previous intent is fulfilled, or by including the context in the request,

If you don't specify a list of contexts, Amazon Lex will use the current list of contexts for the session. If you specify an empty list, all contexts for the session are cleared.

" } } }, @@ -907,6 +1013,13 @@ "documentation":"

A unique identifier for the session.

", "location":"header", "locationName":"x-amz-lex-session-id" + }, + "activeContexts":{ + "shape":"ActiveContextsString", + "documentation":"

A list of active contexts for the session.

", + "jsonvalue":true, + "location":"header", + "locationName":"x-amz-lex-active-contexts" } }, "payload":"audioStream" diff --git a/services/licensemanager/pom.xml b/services/licensemanager/pom.xml index f8b0a27dd6d4..7831c0ecfd28 100644 --- a/services/licensemanager/pom.xml +++ b/services/licensemanager/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT licensemanager AWS Java SDK :: Services :: License Manager diff --git a/services/licensemanager/src/main/resources/codegen-resources/service-2.json b/services/licensemanager/src/main/resources/codegen-resources/service-2.json index 832d2c75cc83..a42c5195a2d1 100644 --- a/services/licensemanager/src/main/resources/codegen-resources/service-2.json +++ b/services/licensemanager/src/main/resources/codegen-resources/service-2.json @@ -12,6 +12,144 @@ "uid":"license-manager-2018-08-01" }, "operations":{ + "AcceptGrant":{ + "name":"AcceptGrant", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AcceptGrantRequest"}, + "output":{"shape":"AcceptGrantResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"AuthorizationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RateLimitExceededException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"ServerInternalException"} + ], + "documentation":"

Accepts the specified grant.

" + }, + "CheckInLicense":{ + "name":"CheckInLicense", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CheckInLicenseRequest"}, + "output":{"shape":"CheckInLicenseResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"AuthorizationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RateLimitExceededException"}, + {"shape":"ServerInternalException"} + ], + "documentation":"

Checks in the specified license. Check in a license when it is no longer in use.

" + }, + "CheckoutBorrowLicense":{ + "name":"CheckoutBorrowLicense", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CheckoutBorrowLicenseRequest"}, + "output":{"shape":"CheckoutBorrowLicenseResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"NoEntitlementsAllowedException"}, + {"shape":"EntitlementNotAllowedException"}, + {"shape":"UnsupportedDigitalSignatureMethodException"}, + {"shape":"RedirectException"}, + {"shape":"AuthorizationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RateLimitExceededException"}, + {"shape":"ServerInternalException"} + ], + "documentation":"

Checks out the specified license for offline use.

" + }, + "CheckoutLicense":{ + "name":"CheckoutLicense", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CheckoutLicenseRequest"}, + "output":{"shape":"CheckoutLicenseResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"NoEntitlementsAllowedException"}, + {"shape":"UnsupportedDigitalSignatureMethodException"}, + {"shape":"RedirectException"}, + {"shape":"AuthorizationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RateLimitExceededException"}, + {"shape":"ServerInternalException"} + ], + "documentation":"

Checks out the specified license.

" + }, + "CreateGrant":{ + "name":"CreateGrant", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateGrantRequest"}, + "output":{"shape":"CreateGrantResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"ServerInternalException"}, + {"shape":"AuthorizationException"}, + {"shape":"RateLimitExceededException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Creates a grant for the specified license. A grant shares the use of license entitlements with specific AWS accounts.

" + }, + "CreateGrantVersion":{ + "name":"CreateGrantVersion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateGrantVersionRequest"}, + "output":{"shape":"CreateGrantVersionResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"ServerInternalException"}, + {"shape":"AuthorizationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RateLimitExceededException"} + ], + "documentation":"

Creates a new version of the specified grant.

" + }, + "CreateLicense":{ + "name":"CreateLicense", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateLicenseRequest"}, + "output":{"shape":"CreateLicenseResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"RedirectException"}, + {"shape":"AuthorizationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RateLimitExceededException"}, + {"shape":"ServerInternalException"} + ], + "documentation":"

Creates a license.

" + }, "CreateLicenseConfiguration":{ "name":"CreateLicenseConfiguration", "http":{ @@ -30,6 +168,85 @@ ], "documentation":"

Creates a license configuration.

A license configuration is an abstraction of a customer license agreement that can be consumed and enforced by License Manager. Components include specifications for the license type (licensing by instance, socket, CPU, or vCPU), allowed tenancy (shared tenancy, Dedicated Instance, Dedicated Host, or all of these), license affinity to host (how long a license must be associated with a host), and the number of licenses purchased and used.

" }, + "CreateLicenseVersion":{ + "name":"CreateLicenseVersion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateLicenseVersionRequest"}, + "output":{"shape":"CreateLicenseVersionResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"RedirectException"}, + {"shape":"ConflictException"}, + {"shape":"ServerInternalException"}, + {"shape":"AuthorizationException"}, + {"shape":"RateLimitExceededException"} + ], + "documentation":"

Creates a new version of the specified license.

" + }, + "CreateToken":{ + "name":"CreateToken", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateTokenRequest"}, + "output":{"shape":"CreateTokenResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AuthorizationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RateLimitExceededException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"ServerInternalException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"RedirectException"} + ], + "documentation":"

Creates a long-lived token.

A refresh token is a JWT token used to get an access token. With an access token, you can call AssumeRoleWithWebIdentity to get role credentials that you can use to call License Manager to manage the specified license.

" + }, + "DeleteGrant":{ + "name":"DeleteGrant", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteGrantRequest"}, + "output":{"shape":"DeleteGrantResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"AuthorizationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RateLimitExceededException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"ServerInternalException"} + ], + "documentation":"

Deletes the specified grant.

" + }, + "DeleteLicense":{ + "name":"DeleteLicense", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteLicenseRequest"}, + "output":{"shape":"DeleteLicenseResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"RedirectException"}, + {"shape":"ConflictException"}, + {"shape":"ServerInternalException"}, + {"shape":"AuthorizationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RateLimitExceededException"} + ], + "documentation":"

Deletes the specified license.

" + }, "DeleteLicenseConfiguration":{ "name":"DeleteLicenseConfiguration", "http":{ @@ -47,6 +264,98 @@ ], "documentation":"

Deletes the specified license configuration.

You cannot delete a license configuration that is in use.

" }, + "DeleteToken":{ + "name":"DeleteToken", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteTokenRequest"}, + "output":{"shape":"DeleteTokenResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AuthorizationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RateLimitExceededException"}, + {"shape":"ServerInternalException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"RedirectException"} + ], + "documentation":"

Deletes the specified token. Must be called in the license home Region.

" + }, + "ExtendLicenseConsumption":{ + "name":"ExtendLicenseConsumption", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ExtendLicenseConsumptionRequest"}, + "output":{"shape":"ExtendLicenseConsumptionResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"AuthorizationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RateLimitExceededException"}, + {"shape":"ServerInternalException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Extends the expiration date for license consumption.

" + }, + "GetAccessToken":{ + "name":"GetAccessToken", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetAccessTokenRequest"}, + "output":{"shape":"GetAccessTokenResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AuthorizationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RateLimitExceededException"}, + {"shape":"ServerInternalException"} + ], + "documentation":"

Gets a temporary access token to use with AssumeRoleWithWebIdentity. Access tokens are valid for one hour.

" + }, + "GetGrant":{ + "name":"GetGrant", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetGrantRequest"}, + "output":{"shape":"GetGrantResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"ServerInternalException"}, + {"shape":"AuthorizationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RateLimitExceededException"} + ], + "documentation":"

Gets detailed information about the specified grant.

" + }, + "GetLicense":{ + "name":"GetLicense", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetLicenseRequest"}, + "output":{"shape":"GetLicenseResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"AuthorizationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RateLimitExceededException"}, + {"shape":"ServerInternalException"} + ], + "documentation":"

Gets detailed information about the specified license.

" + }, "GetLicenseConfiguration":{ "name":"GetLicenseConfiguration", "http":{ @@ -64,6 +373,24 @@ ], "documentation":"

Gets detailed information about the specified license configuration.

" }, + "GetLicenseUsage":{ + "name":"GetLicenseUsage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetLicenseUsageRequest"}, + "output":{"shape":"GetLicenseUsageResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"AuthorizationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RateLimitExceededException"}, + {"shape":"ServerInternalException"} + ], + "documentation":"

Gets detailed information about the usage of the specified license.

" + }, "GetServiceSettings":{ "name":"GetServiceSettings", "http":{ @@ -98,6 +425,25 @@ ], "documentation":"

Lists the resource associations for the specified license configuration.

Resource associations need not consume licenses from a license configuration. For example, an AMI or a stopped instance might not consume a license (depending on the license rules).

" }, + "ListDistributedGrants":{ + "name":"ListDistributedGrants", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDistributedGrantsRequest"}, + "output":{"shape":"ListDistributedGrantsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"ServerInternalException"}, + {"shape":"AuthorizationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RateLimitExceededException"} + ], + "documentation":"

Lists the grants distributed for the specified license.

" + }, "ListFailuresForLicenseConfigurationOperations":{ "name":"ListFailuresForLicenseConfigurationOperations", "http":{ @@ -150,102 +496,106 @@ ], "documentation":"

Describes the license configurations for the specified resource.

" }, - "ListResourceInventory":{ - "name":"ListResourceInventory", + "ListLicenseVersions":{ + "name":"ListLicenseVersions", "http":{ "method":"POST", "requestUri":"/" }, - "input":{"shape":"ListResourceInventoryRequest"}, - "output":{"shape":"ListResourceInventoryResponse"}, + "input":{"shape":"ListLicenseVersionsRequest"}, + "output":{"shape":"ListLicenseVersionsResponse"}, "errors":[ {"shape":"InvalidParameterValueException"}, - {"shape":"ServerInternalException"}, - {"shape":"FilterLimitExceededException"}, - {"shape":"FailedDependencyException"}, {"shape":"AuthorizationException"}, {"shape":"AccessDeniedException"}, - {"shape":"RateLimitExceededException"} + {"shape":"RateLimitExceededException"}, + {"shape":"ServerInternalException"} ], - "documentation":"

Lists resources managed using Systems Manager inventory.

" + "documentation":"

Lists all versions of the specified license.

" }, - "ListTagsForResource":{ - "name":"ListTagsForResource", + "ListLicenses":{ + "name":"ListLicenses", "http":{ "method":"POST", "requestUri":"/" }, - "input":{"shape":"ListTagsForResourceRequest"}, - "output":{"shape":"ListTagsForResourceResponse"}, + "input":{"shape":"ListLicensesRequest"}, + "output":{"shape":"ListLicensesResponse"}, "errors":[ + {"shape":"ValidationException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"ServerInternalException"}, {"shape":"AuthorizationException"}, {"shape":"AccessDeniedException"}, - {"shape":"RateLimitExceededException"} + {"shape":"RateLimitExceededException"}, + {"shape":"ServerInternalException"} ], - "documentation":"

Lists the tags for the specified license configuration.

" + "documentation":"

Lists the licenses for your account.

" }, - "ListUsageForLicenseConfiguration":{ - "name":"ListUsageForLicenseConfiguration", + "ListReceivedGrants":{ + "name":"ListReceivedGrants", "http":{ "method":"POST", "requestUri":"/" }, - "input":{"shape":"ListUsageForLicenseConfigurationRequest"}, - "output":{"shape":"ListUsageForLicenseConfigurationResponse"}, + "input":{"shape":"ListReceivedGrantsRequest"}, + "output":{"shape":"ListReceivedGrantsResponse"}, "errors":[ + {"shape":"ValidationException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"FilterLimitExceededException"}, + {"shape":"ResourceLimitExceededException"}, {"shape":"ServerInternalException"}, {"shape":"AuthorizationException"}, {"shape":"AccessDeniedException"}, {"shape":"RateLimitExceededException"} ], - "documentation":"

Lists all license usage records for a license configuration, displaying license consumption details by resource at a selected point in time. Use this action to audit the current license consumption for any license inventory and configuration.

" + "documentation":"

Lists grants that are received but not accepted.

" }, - "TagResource":{ - "name":"TagResource", + "ListReceivedLicenses":{ + "name":"ListReceivedLicenses", "http":{ "method":"POST", "requestUri":"/" }, - "input":{"shape":"TagResourceRequest"}, - "output":{"shape":"TagResourceResponse"}, + "input":{"shape":"ListReceivedLicensesRequest"}, + "output":{"shape":"ListReceivedLicensesResponse"}, "errors":[ + {"shape":"ValidationException"}, {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceLimitExceededException"}, {"shape":"ServerInternalException"}, {"shape":"AuthorizationException"}, {"shape":"AccessDeniedException"}, {"shape":"RateLimitExceededException"} ], - "documentation":"

Adds the specified tags to the specified license configuration.

" + "documentation":"

Lists received licenses.

" }, - "UntagResource":{ - "name":"UntagResource", + "ListResourceInventory":{ + "name":"ListResourceInventory", "http":{ "method":"POST", "requestUri":"/" }, - "input":{"shape":"UntagResourceRequest"}, - "output":{"shape":"UntagResourceResponse"}, + "input":{"shape":"ListResourceInventoryRequest"}, + "output":{"shape":"ListResourceInventoryResponse"}, "errors":[ {"shape":"InvalidParameterValueException"}, {"shape":"ServerInternalException"}, + {"shape":"FilterLimitExceededException"}, + {"shape":"FailedDependencyException"}, {"shape":"AuthorizationException"}, {"shape":"AccessDeniedException"}, {"shape":"RateLimitExceededException"} ], - "documentation":"

Removes the specified tags from the specified license configuration.

" + "documentation":"

Lists resources managed using Systems Manager inventory.

" }, - "UpdateLicenseConfiguration":{ - "name":"UpdateLicenseConfiguration", + "ListTagsForResource":{ + "name":"ListTagsForResource", "http":{ "method":"POST", "requestUri":"/" }, - "input":{"shape":"UpdateLicenseConfigurationRequest"}, - "output":{"shape":"UpdateLicenseConfigurationResponse"}, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, "errors":[ {"shape":"InvalidParameterValueException"}, {"shape":"ServerInternalException"}, @@ -253,29 +603,135 @@ {"shape":"AccessDeniedException"}, {"shape":"RateLimitExceededException"} ], - "documentation":"

Modifies the attributes of an existing license configuration.

" + "documentation":"

Lists the tags for the specified license configuration.

" }, - "UpdateLicenseSpecificationsForResource":{ - "name":"UpdateLicenseSpecificationsForResource", + "ListTokens":{ + "name":"ListTokens", "http":{ "method":"POST", "requestUri":"/" }, - "input":{"shape":"UpdateLicenseSpecificationsForResourceRequest"}, - "output":{"shape":"UpdateLicenseSpecificationsForResourceResponse"}, + "input":{"shape":"ListTokensRequest"}, + "output":{"shape":"ListTokensResponse"}, "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"InvalidResourceStateException"}, - {"shape":"LicenseUsageException"}, - {"shape":"ServerInternalException"}, + {"shape":"ValidationException"}, {"shape":"AuthorizationException"}, {"shape":"AccessDeniedException"}, - {"shape":"RateLimitExceededException"} + {"shape":"RateLimitExceededException"}, + {"shape":"ServerInternalException"} ], - "documentation":"

Adds or removes the specified license configurations for the specified AWS resource.

You can update the license specifications of AMIs, instances, and hosts. You cannot update the license specifications for launch templates and AWS CloudFormation templates, as they send license configurations to the operation that creates the resource.

" + "documentation":"

Lists your tokens.

" }, - "UpdateServiceSettings":{ - "name":"UpdateServiceSettings", + "ListUsageForLicenseConfiguration":{ + "name":"ListUsageForLicenseConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListUsageForLicenseConfigurationRequest"}, + "output":{"shape":"ListUsageForLicenseConfigurationResponse"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"FilterLimitExceededException"}, + {"shape":"ServerInternalException"}, + {"shape":"AuthorizationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RateLimitExceededException"} + ], + "documentation":"

Lists all license usage records for a license configuration, displaying license consumption details by resource at a selected point in time. Use this action to audit the current license consumption for any license inventory and configuration.

" + }, + "RejectGrant":{ + "name":"RejectGrant", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RejectGrantRequest"}, + "output":{"shape":"RejectGrantResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"AuthorizationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RateLimitExceededException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"ServerInternalException"} + ], + "documentation":"

Rejects the specified grant.

" + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"ServerInternalException"}, + {"shape":"AuthorizationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RateLimitExceededException"} + ], + "documentation":"

Adds the specified tags to the specified license configuration.

" + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"ServerInternalException"}, + {"shape":"AuthorizationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RateLimitExceededException"} + ], + "documentation":"

Removes the specified tags from the specified license configuration.

" + }, + "UpdateLicenseConfiguration":{ + "name":"UpdateLicenseConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateLicenseConfigurationRequest"}, + "output":{"shape":"UpdateLicenseConfigurationResponse"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"ServerInternalException"}, + {"shape":"AuthorizationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RateLimitExceededException"}, + {"shape":"ResourceLimitExceededException"} + ], + "documentation":"

Modifies the attributes of an existing license configuration.

" + }, + "UpdateLicenseSpecificationsForResource":{ + "name":"UpdateLicenseSpecificationsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateLicenseSpecificationsForResourceRequest"}, + "output":{"shape":"UpdateLicenseSpecificationsForResourceResponse"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidResourceStateException"}, + {"shape":"LicenseUsageException"}, + {"shape":"ServerInternalException"}, + {"shape":"AuthorizationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RateLimitExceededException"} + ], + "documentation":"

Adds or removes the specified license configurations for the specified AWS resource.

You can update the license specifications of AMIs, instances, and hosts. You cannot update the license specifications for launch templates and AWS CloudFormation templates, as they send license configurations to the operation that creates the resource.

" + }, + "UpdateServiceSettings":{ + "name":"UpdateServiceSettings", "http":{ "method":"POST", "requestUri":"/" @@ -293,6 +749,33 @@ } }, "shapes":{ + "AcceptGrantRequest":{ + "type":"structure", + "required":["GrantArn"], + "members":{ + "GrantArn":{ + "shape":"Arn", + "documentation":"

Amazon Resource Name (ARN) of the grant.

" + } + } + }, + "AcceptGrantResponse":{ + "type":"structure", + "members":{ + "GrantArn":{ + "shape":"Arn", + "documentation":"

Grant ARN.

" + }, + "Status":{ + "shape":"GrantStatus", + "documentation":"

Grant status.

" + }, + "Version":{ + "shape":"String", + "documentation":"

Grant version.

" + } + } + }, "AccessDeniedException":{ "type":"structure", "members":{ @@ -301,6 +784,33 @@ "documentation":"

Access to resource denied.

", "exception":true }, + "AllowedOperation":{ + "type":"string", + "enum":[ + "CreateGrant", + "CheckoutLicense", + "CheckoutBorrowLicense", + "CheckInLicense", + "ExtendConsumptionLicense", + "ListPurchasedLicenses", + "CreateToken" + ] + }, + "AllowedOperationList":{ + "type":"list", + "member":{"shape":"AllowedOperation"}, + "max":7, + "min":1 + }, + "Arn":{ + "type":"string", + "max":2048, + "pattern":"^arn:aws:[A-Za-z0-9][A-Za-z0-9_/.-]{0,62}:[A-Za-z0-9_/.-]{0,63}:[A-Za-z0-9_/.-]{0,63}:[A-Za-z0-9][A-Za-z0-9:_/+=,@.-]{0,1023}$" + }, + "ArnList":{ + "type":"list", + "member":{"shape":"Arn"} + }, "AuthorizationException":{ "type":"structure", "members":{ @@ -312,105 +822,925 @@ "AutomatedDiscoveryInformation":{ "type":"structure", "members":{ - "LastRunTime":{ - "shape":"DateTime", - "documentation":"

Time that automated discovery last ran.

" + "LastRunTime":{ + "shape":"DateTime", + "documentation":"

Time that automated discovery last ran.

" + } + }, + "documentation":"

Describes automated discovery.

" + }, + "Boolean":{"type":"boolean"}, + "BorrowConfiguration":{ + "type":"structure", + "required":[ + "AllowEarlyCheckIn", + "MaxTimeToLiveInMinutes" + ], + "members":{ + "AllowEarlyCheckIn":{ + "shape":"BoxBoolean", + "documentation":"

Indicates whether early check-ins are allowed.

" + }, + "MaxTimeToLiveInMinutes":{ + "shape":"BoxInteger", + "documentation":"

Maximum time for the borrow configuration, in minutes.

" + } + }, + "documentation":"

Details about a borrow configuration.

" + }, + "BoxBoolean":{"type":"boolean"}, + "BoxInteger":{"type":"integer"}, + "BoxLong":{"type":"long"}, + "CheckInLicenseRequest":{ + "type":"structure", + "required":["LicenseConsumptionToken"], + "members":{ + "LicenseConsumptionToken":{ + "shape":"String", + "documentation":"

License consumption token.

" + }, + "Beneficiary":{ + "shape":"String", + "documentation":"

License beneficiary.

" + } + } + }, + "CheckInLicenseResponse":{ + "type":"structure", + "members":{ + } + }, + "CheckoutBorrowLicenseRequest":{ + "type":"structure", + "required":[ + "LicenseArn", + "Entitlements", + "DigitalSignatureMethod", + "ClientToken" + ], + "members":{ + "LicenseArn":{ + "shape":"Arn", + "documentation":"

Amazon Resource Name (ARN) of the license. The license must use the borrow consumption configuration.

" + }, + "Entitlements":{ + "shape":"EntitlementDataList", + "documentation":"

License entitlements. Partial checkouts are not supported.

" + }, + "DigitalSignatureMethod":{ + "shape":"DigitalSignatureMethod", + "documentation":"

Digital signature method. The possible value is JSON Web Signature (JWS) algorithm PS384. For more information, see RFC 7518 Digital Signature with RSASSA-PSS.

" + }, + "NodeId":{ + "shape":"String", + "documentation":"

Node ID.

" + }, + "CheckoutMetadata":{ + "shape":"MetadataList", + "documentation":"

Information about constraints.

" + }, + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

" + } + } + }, + "CheckoutBorrowLicenseResponse":{ + "type":"structure", + "members":{ + "LicenseArn":{ + "shape":"Arn", + "documentation":"

Amazon Resource Name (ARN) of the license.

" + }, + "LicenseConsumptionToken":{ + "shape":"String", + "documentation":"

License consumption token.

" + }, + "EntitlementsAllowed":{ + "shape":"EntitlementDataList", + "documentation":"

Allowed license entitlements.

" + }, + "NodeId":{ + "shape":"String", + "documentation":"

Node ID.

" + }, + "SignedToken":{ + "shape":"SignedToken", + "documentation":"

Signed token.

" + }, + "IssuedAt":{ + "shape":"ISO8601DateTime", + "documentation":"

Date and time at which the license checkout is issued.

" + }, + "Expiration":{ + "shape":"ISO8601DateTime", + "documentation":"

Date and time at which the license checkout expires.

" + }, + "CheckoutMetadata":{ + "shape":"MetadataList", + "documentation":"

Information about constraints.

" + } + } + }, + "CheckoutLicenseRequest":{ + "type":"structure", + "required":[ + "ProductSKU", + "CheckoutType", + "KeyFingerprint", + "Entitlements", + "ClientToken" + ], + "members":{ + "ProductSKU":{ + "shape":"String", + "documentation":"

Product SKU.

" + }, + "CheckoutType":{ + "shape":"CheckoutType", + "documentation":"

Checkout type.

" + }, + "KeyFingerprint":{ + "shape":"String", + "documentation":"

Key fingerprint identifying the license.

" + }, + "Entitlements":{ + "shape":"EntitlementDataList", + "documentation":"

License entitlements.

" + }, + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

" + }, + "Beneficiary":{ + "shape":"String", + "documentation":"

License beneficiary.

" + }, + "NodeId":{ + "shape":"String", + "documentation":"

Node ID.

" + } + } + }, + "CheckoutLicenseResponse":{ + "type":"structure", + "members":{ + "CheckoutType":{ + "shape":"CheckoutType", + "documentation":"

Checkout type.

" + }, + "LicenseConsumptionToken":{ + "shape":"String", + "documentation":"

License consumption token.

" + }, + "EntitlementsAllowed":{ + "shape":"EntitlementDataList", + "documentation":"

Allowed license entitlements.

" + }, + "SignedToken":{ + "shape":"SignedToken", + "documentation":"

Signed token.

" + }, + "NodeId":{ + "shape":"String", + "documentation":"

Node ID.

" + }, + "IssuedAt":{ + "shape":"ISO8601DateTime", + "documentation":"

Date and time at which the license checkout is issued.

" + }, + "Expiration":{ + "shape":"ISO8601DateTime", + "documentation":"

Date and time at which the license checkout expires.

" + } + } + }, + "CheckoutType":{ + "type":"string", + "enum":["PROVISIONAL"] + }, + "ClientToken":{ + "type":"string", + "max":64, + "pattern":"[a-zA-Z0-9]*" + }, + "ConflictException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

There was a conflict processing the request. Try your request again.

", + "exception":true + }, + "ConsumedLicenseSummary":{ + "type":"structure", + "members":{ + "ResourceType":{ + "shape":"ResourceType", + "documentation":"

Resource type of the resource consuming a license.

" + }, + "ConsumedLicenses":{ + "shape":"BoxLong", + "documentation":"

Number of licenses consumed by the resource.

" + } + }, + "documentation":"

Details about license consumption.

" + }, + "ConsumedLicenseSummaryList":{ + "type":"list", + "member":{"shape":"ConsumedLicenseSummary"} + }, + "ConsumptionConfiguration":{ + "type":"structure", + "members":{ + "RenewType":{ + "shape":"RenewType", + "documentation":"

Renewal frequency.

" + }, + "ProvisionalConfiguration":{ + "shape":"ProvisionalConfiguration", + "documentation":"

Details about a provisional configuration.

" + }, + "BorrowConfiguration":{ + "shape":"BorrowConfiguration", + "documentation":"

Details about a borrow configuration.

" + } + }, + "documentation":"

Details about a consumption configuration.

" + }, + "CreateGrantRequest":{ + "type":"structure", + "required":[ + "ClientToken", + "GrantName", + "LicenseArn", + "Principals", + "HomeRegion", + "AllowedOperations" + ], + "members":{ + "ClientToken":{ + "shape":"String", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

" + }, + "GrantName":{ + "shape":"String", + "documentation":"

Grant name.

" + }, + "LicenseArn":{ + "shape":"Arn", + "documentation":"

Amazon Resource Name (ARN) of the license.

" + }, + "Principals":{ + "shape":"PrincipalArnList", + "documentation":"

The grant principals.

" + }, + "HomeRegion":{ + "shape":"String", + "documentation":"

Home Region of the grant.

" + }, + "AllowedOperations":{ + "shape":"AllowedOperationList", + "documentation":"

Allowed operations for the grant.

" + } + } + }, + "CreateGrantResponse":{ + "type":"structure", + "members":{ + "GrantArn":{ + "shape":"Arn", + "documentation":"

Grant ARN.

" + }, + "Status":{ + "shape":"GrantStatus", + "documentation":"

Grant status.

" + }, + "Version":{ + "shape":"String", + "documentation":"

Grant version.

" + } + } + }, + "CreateGrantVersionRequest":{ + "type":"structure", + "required":[ + "ClientToken", + "GrantArn" + ], + "members":{ + "ClientToken":{ + "shape":"String", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

" + }, + "GrantArn":{ + "shape":"Arn", + "documentation":"

Amazon Resource Name (ARN) of the grant.

" + }, + "GrantName":{ + "shape":"String", + "documentation":"

Grant name.

" + }, + "AllowedOperations":{ + "shape":"AllowedOperationList", + "documentation":"

Allowed operations for the grant.

" + }, + "Status":{ + "shape":"GrantStatus", + "documentation":"

Grant status.

" + }, + "SourceVersion":{ + "shape":"String", + "documentation":"

Current version of the grant.

" + } + } + }, + "CreateGrantVersionResponse":{ + "type":"structure", + "members":{ + "GrantArn":{ + "shape":"Arn", + "documentation":"

Grant ARN.

" + }, + "Status":{ + "shape":"GrantStatus", + "documentation":"

Grant status.

" + }, + "Version":{ + "shape":"String", + "documentation":"

New version of the grant.

" + } + } + }, + "CreateLicenseConfigurationRequest":{ + "type":"structure", + "required":[ + "Name", + "LicenseCountingType" + ], + "members":{ + "Name":{ + "shape":"String", + "documentation":"

Name of the license configuration.

" + }, + "Description":{ + "shape":"String", + "documentation":"

Description of the license configuration.

" + }, + "LicenseCountingType":{ + "shape":"LicenseCountingType", + "documentation":"

Dimension used to track the license inventory.

" + }, + "LicenseCount":{ + "shape":"BoxLong", + "documentation":"

Number of licenses managed by the license configuration.

" + }, + "LicenseCountHardLimit":{ + "shape":"BoxBoolean", + "documentation":"

Indicates whether hard or soft license enforcement is used. Exceeding a hard limit blocks the launch of new instances.

" + }, + "LicenseRules":{ + "shape":"StringList", + "documentation":"

License rules. The syntax is #name=value (for example, #allowedTenancy=EC2-DedicatedHost). The available rules vary by dimension, as follows.

  • Cores dimension: allowedTenancy | licenseAffinityToHost | maximumCores | minimumCores

  • Instances dimension: allowedTenancy | maximumCores | minimumCores | maximumSockets | minimumSockets | maximumVcpus | minimumVcpus

  • Sockets dimension: allowedTenancy | licenseAffinityToHost | maximumSockets | minimumSockets

  • vCPUs dimension: allowedTenancy | honorVcpuOptimization | maximumVcpus | minimumVcpus

The unit for licenseAffinityToHost is days and the range is 1 to 180. The possible values for allowedTenancy are EC2-Default, EC2-DedicatedHost, and EC2-DedicatedInstance. The possible values for honorVcpuOptimization are True and False.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

Tags to add to the license configuration.

" + }, + "ProductInformationList":{ + "shape":"ProductInformationList", + "documentation":"

Product information.

" + } + } + }, + "CreateLicenseConfigurationResponse":{ + "type":"structure", + "members":{ + "LicenseConfigurationArn":{ + "shape":"String", + "documentation":"

Amazon Resource Name (ARN) of the license configuration.

" + } + } + }, + "CreateLicenseRequest":{ + "type":"structure", + "required":[ + "LicenseName", + "ProductName", + "ProductSKU", + "Issuer", + "HomeRegion", + "Validity", + "Entitlements", + "Beneficiary", + "ConsumptionConfiguration", + "ClientToken" + ], + "members":{ + "LicenseName":{ + "shape":"String", + "documentation":"

License name.

" + }, + "ProductName":{ + "shape":"String", + "documentation":"

Product name.

" + }, + "ProductSKU":{ + "shape":"String", + "documentation":"

Product SKU.

" + }, + "Issuer":{ + "shape":"Issuer", + "documentation":"

License issuer.

" + }, + "HomeRegion":{ + "shape":"String", + "documentation":"

Home Region for the license.

" + }, + "Validity":{ + "shape":"DatetimeRange", + "documentation":"

Date and time range during which the license is valid, in ISO8601-UTC format.

" + }, + "Entitlements":{ + "shape":"EntitlementList", + "documentation":"

License entitlements.

" + }, + "Beneficiary":{ + "shape":"String", + "documentation":"

License beneficiary.

" + }, + "ConsumptionConfiguration":{ + "shape":"ConsumptionConfiguration", + "documentation":"

Configuration for consumption of the license. Choose a provisional configuration for workloads running with continuous connectivity. Choose a borrow configuration for workloads with offline usage.

" + }, + "LicenseMetadata":{ + "shape":"MetadataList", + "documentation":"

Information about the license.

" + }, + "ClientToken":{ + "shape":"String", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

" + } + } + }, + "CreateLicenseResponse":{ + "type":"structure", + "members":{ + "LicenseArn":{ + "shape":"Arn", + "documentation":"

Amazon Resource Name (ARN) of the license.

" + }, + "Status":{ + "shape":"LicenseStatus", + "documentation":"

License status.

" + }, + "Version":{ + "shape":"String", + "documentation":"

License version.

" + } + } + }, + "CreateLicenseVersionRequest":{ + "type":"structure", + "required":[ + "LicenseArn", + "LicenseName", + "ProductName", + "Issuer", + "HomeRegion", + "Validity", + "Entitlements", + "ConsumptionConfiguration", + "Status", + "ClientToken" + ], + "members":{ + "LicenseArn":{ + "shape":"Arn", + "documentation":"

Amazon Resource Name (ARN) of the license.

" + }, + "LicenseName":{ + "shape":"String", + "documentation":"

License name.

" + }, + "ProductName":{ + "shape":"String", + "documentation":"

Product name.

" + }, + "Issuer":{ + "shape":"Issuer", + "documentation":"

License issuer.

" + }, + "HomeRegion":{ + "shape":"String", + "documentation":"

Home Region of the license.

" + }, + "Validity":{ + "shape":"DatetimeRange", + "documentation":"

Date and time range during which the license is valid, in ISO8601-UTC format.

" + }, + "LicenseMetadata":{ + "shape":"MetadataList", + "documentation":"

Information about the license.

" + }, + "Entitlements":{ + "shape":"EntitlementList", + "documentation":"

License entitlements.

" + }, + "ConsumptionConfiguration":{ + "shape":"ConsumptionConfiguration", + "documentation":"

Configuration for consumption of the license. Choose a provisional configuration for workloads running with continuous connectivity. Choose a borrow configuration for workloads with offline usage.

" + }, + "Status":{ + "shape":"LicenseStatus", + "documentation":"

License status.

" + }, + "ClientToken":{ + "shape":"String", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

" + }, + "SourceVersion":{ + "shape":"String", + "documentation":"

Current version of the license.

" + } + } + }, + "CreateLicenseVersionResponse":{ + "type":"structure", + "members":{ + "LicenseArn":{ + "shape":"Arn", + "documentation":"

License ARN.

" + }, + "Version":{ + "shape":"String", + "documentation":"

New version of the license.

" + }, + "Status":{ + "shape":"LicenseStatus", + "documentation":"

License status.

" + } + } + }, + "CreateTokenRequest":{ + "type":"structure", + "required":[ + "LicenseArn", + "ClientToken" + ], + "members":{ + "LicenseArn":{ + "shape":"Arn", + "documentation":"

Amazon Resource Name (ARN) of the license. The ARN is mapped to the aud claim of the JWT token.

" + }, + "RoleArns":{ + "shape":"ArnList", + "documentation":"

Amazon Resource Name (ARN) of the IAM roles to embed in the token. License Manager does not check whether the roles are in use.

" + }, + "ExpirationInDays":{ + "shape":"Integer", + "documentation":"

Token expiration, in days, counted from token creation. The default is 365 days.

" + }, + "TokenProperties":{ + "shape":"MaxSize3StringList", + "documentation":"

Data specified by the caller to be included in the JWT token. The data is mapped to the amr claim of the JWT token.

" + }, + "ClientToken":{ + "shape":"IdempotencyToken", + "documentation":"

Idempotency token, valid for 10 minutes.

" + } + } + }, + "CreateTokenResponse":{ + "type":"structure", + "members":{ + "TokenId":{ + "shape":"String", + "documentation":"

Token ID.

" + }, + "TokenType":{ + "shape":"TokenType", + "documentation":"

Token type.

" + }, + "Token":{ + "shape":"TokenString", + "documentation":"

Refresh token, encoded as a JWT token.

" + } + } + }, + "DateTime":{"type":"timestamp"}, + "DatetimeRange":{ + "type":"structure", + "required":["Begin"], + "members":{ + "Begin":{ + "shape":"ISO8601DateTime", + "documentation":"

Start of the time range.

" + }, + "End":{ + "shape":"ISO8601DateTime", + "documentation":"

End of the time range.

" + } + }, + "documentation":"

Describes a time range, in ISO8601-UTC format.

" + }, + "DeleteGrantRequest":{ + "type":"structure", + "required":[ + "GrantArn", + "Version" + ], + "members":{ + "GrantArn":{ + "shape":"Arn", + "documentation":"

Amazon Resource Name (ARN) of the grant.

" + }, + "Version":{ + "shape":"String", + "documentation":"

Current version of the grant.

" + } + } + }, + "DeleteGrantResponse":{ + "type":"structure", + "members":{ + "GrantArn":{ + "shape":"Arn", + "documentation":"

Grant ARN.

" + }, + "Status":{ + "shape":"GrantStatus", + "documentation":"

Grant status.

" + }, + "Version":{ + "shape":"String", + "documentation":"

Grant version.

" + } + } + }, + "DeleteLicenseConfigurationRequest":{ + "type":"structure", + "required":["LicenseConfigurationArn"], + "members":{ + "LicenseConfigurationArn":{ + "shape":"String", + "documentation":"

ID of the license configuration.

" + } + } + }, + "DeleteLicenseConfigurationResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteLicenseRequest":{ + "type":"structure", + "required":[ + "LicenseArn", + "SourceVersion" + ], + "members":{ + "LicenseArn":{ + "shape":"Arn", + "documentation":"

Amazon Resource Name (ARN) of the license.

" + }, + "SourceVersion":{ + "shape":"String", + "documentation":"

Current version of the license.

" + } + } + }, + "DeleteLicenseResponse":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"LicenseDeletionStatus", + "documentation":"

License status.

" + }, + "DeletionDate":{ + "shape":"ISO8601DateTime", + "documentation":"

Date on which the license is deleted.

" + } + } + }, + "DeleteTokenRequest":{ + "type":"structure", + "required":["TokenId"], + "members":{ + "TokenId":{ + "shape":"String", + "documentation":"

Token ID.

" + } + } + }, + "DeleteTokenResponse":{ + "type":"structure", + "members":{ + } + }, + "DigitalSignatureMethod":{ + "type":"string", + "enum":["JWT_PS384"] + }, + "Entitlement":{ + "type":"structure", + "required":[ + "Name", + "Unit" + ], + "members":{ + "Name":{ + "shape":"String", + "documentation":"

Entitlement name.

" + }, + "Value":{ + "shape":"String", + "documentation":"

Entitlement resource. Use only if the unit is None.

" + }, + "MaxCount":{ + "shape":"Long", + "documentation":"

Maximum entitlement count. Use if the unit is not None.

" + }, + "Overage":{ + "shape":"BoxBoolean", + "documentation":"

Indicates whether overages are allowed.

" + }, + "Unit":{ + "shape":"EntitlementUnit", + "documentation":"

Entitlement unit.

" + }, + "AllowCheckIn":{ + "shape":"BoxBoolean", + "documentation":"

Indicates whether check-ins are allowed.

" } }, - "documentation":"

Describes automated discovery.

" + "documentation":"

Describes a resource entitled for use with a license.

" }, - "Boolean":{"type":"boolean"}, - "BoxBoolean":{"type":"boolean"}, - "BoxInteger":{"type":"integer"}, - "BoxLong":{"type":"long"}, - "ConsumedLicenseSummary":{ + "EntitlementData":{ "type":"structure", + "required":[ + "Name", + "Unit" + ], "members":{ - "ResourceType":{ - "shape":"ResourceType", - "documentation":"

Resource type of the resource consuming a license.

" + "Name":{ + "shape":"String", + "documentation":"

Entitlement data name.

" }, - "ConsumedLicenses":{ - "shape":"BoxLong", - "documentation":"

Number of licenses consumed by the resource.

" + "Value":{ + "shape":"String", + "documentation":"

Entitlement data value.

" + }, + "Unit":{ + "shape":"EntitlementDataUnit", + "documentation":"

Entitlement data unit.

" } }, - "documentation":"

Details about license consumption.

" + "documentation":"

Data associated with an entitlement resource.

" }, - "ConsumedLicenseSummaryList":{ + "EntitlementDataList":{ "type":"list", - "member":{"shape":"ConsumedLicenseSummary"} + "member":{"shape":"EntitlementData"} }, - "CreateLicenseConfigurationRequest":{ + "EntitlementDataUnit":{ + "type":"string", + "enum":[ + "Count", + "None", + "Seconds", + "Microseconds", + "Milliseconds", + "Bytes", + "Kilobytes", + "Megabytes", + "Gigabytes", + "Terabytes", + "Bits", + "Kilobits", + "Megabits", + "Gigabits", + "Terabits", + "Percent", + "Bytes/Second", + "Kilobytes/Second", + "Megabytes/Second", + "Gigabytes/Second", + "Terabytes/Second", + "Bits/Second", + "Kilobits/Second", + "Megabits/Second", + "Gigabits/Second", + "Terabits/Second", + "Count/Second" + ] + }, + "EntitlementList":{ + "type":"list", + "member":{"shape":"Entitlement"} + }, + "EntitlementNotAllowedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

The entitlement is not allowed.

", + "exception":true + }, + "EntitlementUnit":{ + "type":"string", + "enum":[ + "Count", + "None", + "Seconds", + "Microseconds", + "Milliseconds", + "Bytes", + "Kilobytes", + "Megabytes", + "Gigabytes", + "Terabytes", + "Bits", + "Kilobits", + "Megabits", + "Gigabits", + "Terabits", + "Percent", + "Bytes/Second", + "Kilobytes/Second", + "Megabytes/Second", + "Gigabytes/Second", + "Terabytes/Second", + "Bits/Second", + "Kilobits/Second", + "Megabits/Second", + "Gigabits/Second", + "Terabits/Second", + "Count/Second" + ] + }, + "EntitlementUsage":{ "type":"structure", "required":[ "Name", - "LicenseCountingType" + "ConsumedValue", + "Unit" ], "members":{ "Name":{ "shape":"String", - "documentation":"

Name of the license configuration.

" + "documentation":"

Entitlement usage name.

" }, - "Description":{ + "ConsumedValue":{ "shape":"String", - "documentation":"

Description of the license configuration.

" - }, - "LicenseCountingType":{ - "shape":"LicenseCountingType", - "documentation":"

Dimension used to track the license inventory.

" - }, - "LicenseCount":{ - "shape":"BoxLong", - "documentation":"

Number of licenses managed by the license configuration.

" - }, - "LicenseCountHardLimit":{ - "shape":"BoxBoolean", - "documentation":"

Indicates whether hard or soft license enforcement is used. Exceeding a hard limit blocks the launch of new instances.

" - }, - "LicenseRules":{ - "shape":"StringList", - "documentation":"

License rules. The syntax is #name=value (for example, #allowedTenancy=EC2-DedicatedHost). The available rules vary by dimension, as follows.

  • Cores dimension: allowedTenancy | licenseAffinityToHost | maximumCores | minimumCores

  • Instances dimension: allowedTenancy | maximumCores | minimumCores | maximumSockets | minimumSockets | maximumVcpus | minimumVcpus

  • Sockets dimension: allowedTenancy | licenseAffinityToHost | maximumSockets | minimumSockets

  • vCPUs dimension: allowedTenancy | honorVcpuOptimization | maximumVcpus | minimumVcpus

The unit for licenseAffinityToHost is days and the range is 1 to 180. The possible values for allowedTenancy are EC2-Default, EC2-DedicatedHost, and EC2-DedicatedInstance. The possible values for honorVcpuOptimization are True and False.

" + "documentation":"

Resource usage consumed.

" }, - "Tags":{ - "shape":"TagList", - "documentation":"

Tags to add to the license configuration.

" + "MaxCount":{ + "shape":"String", + "documentation":"

Maximum entitlement usage count.

" }, - "ProductInformationList":{ - "shape":"ProductInformationList", - "documentation":"

Product information.

" + "Unit":{ + "shape":"EntitlementDataUnit", + "documentation":"

Entitlement usage unit.

" } - } + }, + "documentation":"

Usage associated with an entitlement resource.

" }, - "CreateLicenseConfigurationResponse":{ + "EntitlementUsageList":{ + "type":"list", + "member":{"shape":"EntitlementUsage"} + }, + "ExtendLicenseConsumptionRequest":{ "type":"structure", + "required":["LicenseConsumptionToken"], "members":{ - "LicenseConfigurationArn":{ + "LicenseConsumptionToken":{ "shape":"String", - "documentation":"

Amazon Resource Name (ARN) of the license configuration.

" + "documentation":"

License consumption token.

" + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request. Provides an error response if you do not have the required permissions.

" } } }, - "DateTime":{"type":"timestamp"}, - "DeleteLicenseConfigurationRequest":{ + "ExtendLicenseConsumptionResponse":{ "type":"structure", - "required":["LicenseConfigurationArn"], "members":{ - "LicenseConfigurationArn":{ + "LicenseConsumptionToken":{ "shape":"String", - "documentation":"

ID of the license configuration.

" + "documentation":"

License consumption token.

" + }, + "Expiration":{ + "shape":"ISO8601DateTime", + "documentation":"

Date and time at which the license consumption expires.

" } } }, - "DeleteLicenseConfigurationResponse":{ - "type":"structure", - "members":{ - } - }, "FailedDependencyException":{ "type":"structure", "members":{ - "Message":{"shape":"Message"} + "Message":{"shape":"Message"}, + "ErrorCode":{"shape":"String"} }, "documentation":"

A dependency required to run the API is missing.

", "exception":true @@ -437,6 +1767,10 @@ "documentation":"

The request uses too many filters or too many filter values.

", "exception":true }, + "FilterList":{ + "type":"list", + "member":{"shape":"Filter"} + }, "FilterName":{"type":"string"}, "FilterValue":{"type":"string"}, "FilterValues":{ @@ -447,6 +1781,52 @@ "type":"list", "member":{"shape":"Filter"} }, + "GetAccessTokenRequest":{ + "type":"structure", + "required":["Token"], + "members":{ + "Token":{ + "shape":"TokenString", + "documentation":"

Refresh token, encoded as a JWT token.

" + }, + "TokenProperties":{ + "shape":"MaxSize3StringList", + "documentation":"

Token properties to validate against those present in the JWT token.

" + } + } + }, + "GetAccessTokenResponse":{ + "type":"structure", + "members":{ + "AccessToken":{ + "shape":"TokenString", + "documentation":"

Temporary access token.

" + } + } + }, + "GetGrantRequest":{ + "type":"structure", + "required":["GrantArn"], + "members":{ + "GrantArn":{ + "shape":"Arn", + "documentation":"

Amazon Resource Name (ARN) of the grant.

" + }, + "Version":{ + "shape":"String", + "documentation":"

Grant version.

" + } + } + }, + "GetGrantResponse":{ + "type":"structure", + "members":{ + "Grant":{ + "shape":"Grant", + "documentation":"

Grant details.

" + } + } + }, "GetLicenseConfigurationRequest":{ "type":"structure", "required":["LicenseConfigurationArn"], @@ -526,6 +1906,48 @@ } } }, + "GetLicenseRequest":{ + "type":"structure", + "required":["LicenseArn"], + "members":{ + "LicenseArn":{ + "shape":"Arn", + "documentation":"

Amazon Resource Name (ARN) of the license.

" + }, + "Version":{ + "shape":"String", + "documentation":"

License version.

" + } + } + }, + "GetLicenseResponse":{ + "type":"structure", + "members":{ + "License":{ + "shape":"License", + "documentation":"

License details.

" + } + } + }, + "GetLicenseUsageRequest":{ + "type":"structure", + "required":["LicenseArn"], + "members":{ + "LicenseArn":{ + "shape":"Arn", + "documentation":"

Amazon Resource Name (ARN) of the license.

" + } + } + }, + "GetLicenseUsageResponse":{ + "type":"structure", + "members":{ + "LicenseUsage":{ + "shape":"LicenseUsage", + "documentation":"

License usage details.

" + } + } + }, "GetServiceSettingsRequest":{ "type":"structure", "members":{ @@ -544,11 +1966,11 @@ }, "OrganizationConfiguration":{ "shape":"OrganizationConfiguration", - "documentation":"

Indicates whether AWS Organizations has been integrated with License Manager for cross-account discovery.

" + "documentation":"

Indicates whether AWS Organizations is integrated with License Manager for cross-account discovery.

" }, "EnableCrossAccountsDiscovery":{ "shape":"BoxBoolean", - "documentation":"

Indicates whether cross-account discovery has been enabled.

" + "documentation":"

Indicates whether cross-account discovery is enabled.

" }, "LicenseManagerResourceShareArn":{ "shape":"String", @@ -556,6 +1978,161 @@ } } }, + "Grant":{ + "type":"structure", + "required":[ + "GrantArn", + "GrantName", + "ParentArn", + "LicenseArn", + "GranteePrincipalArn", + "HomeRegion", + "GrantStatus", + "Version", + "GrantedOperations" + ], + "members":{ + "GrantArn":{ + "shape":"Arn", + "documentation":"

Amazon Resource Name (ARN) of the grant.

" + }, + "GrantName":{ + "shape":"String", + "documentation":"

Grant name.

" + }, + "ParentArn":{ + "shape":"Arn", + "documentation":"

Parent ARN.

" + }, + "LicenseArn":{ + "shape":"Arn", + "documentation":"

License ARN.

" + }, + "GranteePrincipalArn":{ + "shape":"Arn", + "documentation":"

The grantee principal ARN.

" + }, + "HomeRegion":{ + "shape":"String", + "documentation":"

Home Region of the grant.

" + }, + "GrantStatus":{ + "shape":"GrantStatus", + "documentation":"

Grant status.

" + }, + "StatusReason":{ + "shape":"String", + "documentation":"

Grant status reason.

" + }, + "Version":{ + "shape":"String", + "documentation":"

Grant version.

" + }, + "GrantedOperations":{ + "shape":"AllowedOperationList", + "documentation":"

Granted operations.

" + } + }, + "documentation":"

Describes a grant.

" + }, + "GrantList":{ + "type":"list", + "member":{"shape":"Grant"} + }, + "GrantStatus":{ + "type":"string", + "enum":[ + "PENDING_WORKFLOW", + "PENDING_ACCEPT", + "REJECTED", + "ACTIVE", + "FAILED_WORKFLOW", + "DELETED", + "PENDING_DELETE", + "DISABLED" + ] + }, + "GrantedLicense":{ + "type":"structure", + "members":{ + "LicenseArn":{ + "shape":"Arn", + "documentation":"

Amazon Resource Name (ARN) of the license.

" + }, + "LicenseName":{ + "shape":"String", + "documentation":"

License name.

" + }, + "ProductName":{ + "shape":"String", + "documentation":"

Product name.

" + }, + "ProductSKU":{ + "shape":"String", + "documentation":"

Product SKU.

" + }, + "Issuer":{ + "shape":"IssuerDetails", + "documentation":"

Granted license issuer.

" + }, + "HomeRegion":{ + "shape":"String", + "documentation":"

Home Region of the granted license.

" + }, + "Status":{ + "shape":"LicenseStatus", + "documentation":"

Granted license status.

" + }, + "Validity":{ + "shape":"DatetimeRange", + "documentation":"

Date and time range during which the granted license is valid, in ISO8601-UTC format.

" + }, + "Beneficiary":{ + "shape":"String", + "documentation":"

Granted license beneficiary.

" + }, + "Entitlements":{ + "shape":"EntitlementList", + "documentation":"

License entitlements.

" + }, + "ConsumptionConfiguration":{ + "shape":"ConsumptionConfiguration", + "documentation":"

Configuration for consumption of the license.

" + }, + "LicenseMetadata":{ + "shape":"MetadataList", + "documentation":"

Granted license metadata.

" + }, + "CreateTime":{ + "shape":"ISO8601DateTime", + "documentation":"

Creation time of the granted license.

" + }, + "Version":{ + "shape":"String", + "documentation":"

Version of the granted license.

" + }, + "ReceivedMetadata":{ + "shape":"ReceivedMetadata", + "documentation":"

Granted license received metadata.

" + } + }, + "documentation":"

Describes a license that is granted to a grantee.

" + }, + "GrantedLicenseList":{ + "type":"list", + "member":{"shape":"GrantedLicense"} + }, + "ISO8601DateTime":{ + "type":"string", + "max":50, + "pattern":"^(-?(?:[1-9][0-9]*)?[0-9]{4})-(1[0-2]|0[1-9])-(3[0-1]|0[1-9]|[1-2][0-9])T(2[0-3]|[0-1][0-9]):([0-5][0-9]):([0-5][0-9])(\\.[0-9]+)?(Z|[+-](?:2[ 0-3]|[0-1][0-9]):[0-5][0-9])+$" + }, + "IdempotencyToken":{ + "type":"string", + "max":60, + "pattern":"\\S+" + }, + "Integer":{"type":"integer"}, "InvalidParameterValueException":{ "type":"structure", "members":{ @@ -608,6 +2185,101 @@ "type":"list", "member":{"shape":"InventoryFilter"} }, + "Issuer":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"String", + "documentation":"

Issuer name.

" + }, + "SignKey":{ + "shape":"String", + "documentation":"

Asymmetric CMK from AWS Key Management Service. The CMK must have a key usage of sign and verify, and support the RSASSA-PSS SHA-256 signing algorithm.

" + } + }, + "documentation":"

Details about the issuer of a license.

" + }, + "IssuerDetails":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"String", + "documentation":"

Issuer name.

" + }, + "SignKey":{ + "shape":"String", + "documentation":"

Asymmetric CMK from AWS Key Management Service. The CMK must have a key usage of sign and verify, and support the RSASSA-PSS SHA-256 signing algorithm.

" + }, + "KeyFingerprint":{ + "shape":"String", + "documentation":"

Issuer key fingerprint.

" + } + }, + "documentation":"

Details associated with the issuer of a license.

" + }, + "License":{ + "type":"structure", + "members":{ + "LicenseArn":{ + "shape":"Arn", + "documentation":"

Amazon Resource Name (ARN) of the license.

" + }, + "LicenseName":{ + "shape":"String", + "documentation":"

License name.

" + }, + "ProductName":{ + "shape":"String", + "documentation":"

Product name.

" + }, + "ProductSKU":{ + "shape":"String", + "documentation":"

Product SKU.

" + }, + "Issuer":{ + "shape":"IssuerDetails", + "documentation":"

License issuer.

" + }, + "HomeRegion":{ + "shape":"String", + "documentation":"

Home Region of the license.

" + }, + "Status":{ + "shape":"LicenseStatus", + "documentation":"

License status.

" + }, + "Validity":{ + "shape":"DatetimeRange", + "documentation":"

Date and time range during which the license is valid, in ISO8601-UTC format.

" + }, + "Beneficiary":{ + "shape":"String", + "documentation":"

License beneficiary.

" + }, + "Entitlements":{ + "shape":"EntitlementList", + "documentation":"

License entitlements.

" + }, + "ConsumptionConfiguration":{ + "shape":"ConsumptionConfiguration", + "documentation":"

Configuration for consumption of the license.

" + }, + "LicenseMetadata":{ + "shape":"MetadataList", + "documentation":"

License metadata.

" + }, + "CreateTime":{ + "shape":"ISO8601DateTime", + "documentation":"

License creation time.

" + }, + "Version":{ + "shape":"String", + "documentation":"

License version.

" + } + }, + "documentation":"

Software license that is managed in AWS License Manager.

" + }, "LicenseConfiguration":{ "type":"structure", "members":{ @@ -692,6 +2364,10 @@ "AssociationTime":{ "shape":"DateTime", "documentation":"

Time when the license configuration was associated with the resource.

" + }, + "AmiAssociationScope":{ + "shape":"String", + "documentation":"

Scope of AMI associations. The possible value is cross-account.

" } }, "documentation":"

Describes an association with a license configuration.

" @@ -754,6 +2430,17 @@ "Socket" ] }, + "LicenseDeletionStatus":{ + "type":"string", + "enum":[ + "PENDING_DELETE", + "DELETED" + ] + }, + "LicenseList":{ + "type":"list", + "member":{"shape":"License"} + }, "LicenseOperationFailure":{ "type":"structure", "members":{ @@ -803,6 +2490,10 @@ "LicenseConfigurationArn":{ "shape":"String", "documentation":"

Amazon Resource Name (ARN) of the license configuration.

" + }, + "AmiAssociationScope":{ + "shape":"String", + "documentation":"

Scope of AMI associations. The possible value is cross-account.

" } }, "documentation":"

Details for associating a license configuration with a resource.

" @@ -811,6 +2502,28 @@ "type":"list", "member":{"shape":"LicenseSpecification"} }, + "LicenseStatus":{ + "type":"string", + "enum":[ + "AVAILABLE", + "PENDING_AVAILABLE", + "DEACTIVATED", + "SUSPENDED", + "EXPIRED", + "PENDING_DELETE", + "DELETED" + ] + }, + "LicenseUsage":{ + "type":"structure", + "members":{ + "EntitlementUsages":{ + "shape":"EntitlementUsageList", + "documentation":"

License entitlement usages.

" + } + }, + "documentation":"

Describes the entitlement usage associated with a license.

" + }, "LicenseUsageException":{ "type":"structure", "members":{ @@ -850,6 +2563,40 @@ } } }, + "ListDistributedGrantsRequest":{ + "type":"structure", + "members":{ + "GrantArns":{ + "shape":"ArnList", + "documentation":"

Amazon Resource Names (ARNs) of the grants.

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

Filters to scope the results. The following filters are supported:

  • LicenseARN

  • Status

  • PrincipalARN

  • ParentARN

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

Token for the next set of results.

" + }, + "MaxResults":{ + "shape":"MaxSize100", + "documentation":"

Maximum number of results to return in a single call.

" + } + } + }, + "ListDistributedGrantsResponse":{ + "type":"structure", + "members":{ + "Grants":{ + "shape":"GrantList", + "documentation":"

Distributed grant details.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

Token for the next set of results.

" + } + } + }, "ListFailuresForLicenseConfigurationOperationsRequest":{ "type":"structure", "required":["LicenseConfigurationArn"], @@ -946,6 +2693,139 @@ } } }, + "ListLicenseVersionsRequest":{ + "type":"structure", + "required":["LicenseArn"], + "members":{ + "LicenseArn":{ + "shape":"Arn", + "documentation":"

Amazon Resource Name (ARN) of the license.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

Token for the next set of results.

" + }, + "MaxResults":{ + "shape":"MaxSize100", + "documentation":"

Maximum number of results to return in a single call.

" + } + } + }, + "ListLicenseVersionsResponse":{ + "type":"structure", + "members":{ + "Licenses":{ + "shape":"LicenseList", + "documentation":"

License details.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

Token for the next set of results.

" + } + } + }, + "ListLicensesRequest":{ + "type":"structure", + "members":{ + "LicenseArns":{ + "shape":"ArnList", + "documentation":"

Amazon Resource Names (ARNs) of the licenses.

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

Filters to scope the results. The following filters are supported:

  • Beneficiary

  • ProductSKU

  • KeyFingerprint

  • Status

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

Token for the next set of results.

" + }, + "MaxResults":{ + "shape":"MaxSize100", + "documentation":"

Maximum number of results to return in a single call.

" + } + } + }, + "ListLicensesResponse":{ + "type":"structure", + "members":{ + "Licenses":{ + "shape":"LicenseList", + "documentation":"

License details.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

Token for the next set of results.

" + } + } + }, + "ListReceivedGrantsRequest":{ + "type":"structure", + "members":{ + "GrantArns":{ + "shape":"ArnList", + "documentation":"

Amazon Resource Names (ARNs) of the grants.

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

Filters to scope the results. The following filters are supported:

  • LicenseARN

  • Status

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

Token for the next set of results.

" + }, + "MaxResults":{ + "shape":"MaxSize100", + "documentation":"

Maximum number of results to return in a single call.

" + } + } + }, + "ListReceivedGrantsResponse":{ + "type":"structure", + "members":{ + "Grants":{ + "shape":"GrantList", + "documentation":"

Received grant details.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

Token for the next set of results.

" + } + } + }, + "ListReceivedLicensesRequest":{ + "type":"structure", + "members":{ + "LicenseArns":{ + "shape":"ArnList", + "documentation":"

Amazon Resource Names (ARNs) of the licenses.

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

Filters to scope the results. The following filters are supported:

  • ProductSKU

  • Status

  • KeyFingerprint

  • Issuer

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

Token for the next set of results.

" + }, + "MaxResults":{ + "shape":"MaxSize100", + "documentation":"

Maximum number of results to return in a single call.

" + } + } + }, + "ListReceivedLicensesResponse":{ + "type":"structure", + "members":{ + "Licenses":{ + "shape":"GrantedLicenseList", + "documentation":"

Received license details.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

Token for the next set of results.

" + } + } + }, "ListResourceInventoryRequest":{ "type":"structure", "members":{ @@ -995,6 +2875,40 @@ } } }, + "ListTokensRequest":{ + "type":"structure", + "members":{ + "TokenIds":{ + "shape":"StringList", + "documentation":"

Token IDs.

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

Filters to scope the results. The following filter is supported:

  • licenseArns

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

Token for the next set of results.

" + }, + "MaxResults":{ + "shape":"MaxSize100", + "documentation":"

Maximum number of results to return in a single call.

" + } + } + }, + "ListTokensResponse":{ + "type":"structure", + "members":{ + "Tokens":{ + "shape":"TokenList", + "documentation":"

Received token details.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

Token for the next set of results.

" + } + } + }, "ListUsageForLicenseConfigurationRequest":{ "type":"structure", "required":["LicenseConfigurationArn"], @@ -1013,7 +2927,7 @@ }, "Filters":{ "shape":"Filters", - "documentation":"

Filters to scope the results. The following filters and logical operators are supported:

  • resourceArn - The ARN of the license configuration resource. Logical operators are EQUALS | NOT_EQUALS.

  • resourceType - The resource type (EC2_INSTANCE | EC2_HOST | EC2_AMI | SYSTEMS_MANAGER_MANAGED_INSTANCE). Logical operators are EQUALS | NOT_EQUALS.

  • resourceAccount - The ID of the account that owns the resource. Logical operators are EQUALS | NOT_EQUALS.

" + "documentation":"

Filters to scope the results. The following filters and logical operators are supported:

  • resourceArn - The ARN of the license configuration resource. Logical operators are EQUALS | NOT_EQUALS.

  • resourceType - The resource type (EC2_INSTANCE | EC2_HOST | EC2_AMI | SYSTEMS_MANAGER_MANAGED_INSTANCE). Logical operators are EQUALS | NOT_EQUALS.

  • resourceAccount - The ID of the account that owns the resource. Logical operators are EQUALS | NOT_EQUALS.

" } } }, @@ -1030,6 +2944,8 @@ } } }, + "Location":{"type":"string"}, + "Long":{"type":"long"}, "ManagedResourceSummary":{ "type":"structure", "members":{ @@ -1048,25 +2964,43 @@ "type":"list", "member":{"shape":"ManagedResourceSummary"} }, + "MaxSize100":{ + "type":"integer", + "max":100, + "min":1 + }, + "MaxSize3StringList":{ + "type":"list", + "member":{"shape":"String"}, + "max":3 + }, "Message":{"type":"string"}, "Metadata":{ "type":"structure", "members":{ "Name":{ "shape":"String", - "documentation":"

Reserved.

" + "documentation":"

The key name.

" }, "Value":{ "shape":"String", - "documentation":"

Reserved.

" + "documentation":"

The value.

" } }, - "documentation":"

Reserved.

" + "documentation":"

Describes key/value pairs.

" }, "MetadataList":{ "type":"list", "member":{"shape":"Metadata"} }, + "NoEntitlementsAllowedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

There are no entitlements found for this license, or the entitlement maximum count is reached.

", + "exception":true + }, "OrganizationConfiguration":{ "type":"structure", "required":["EnableIntegration"], @@ -1078,6 +3012,12 @@ }, "documentation":"

Configuration information for AWS Organizations.

" }, + "PrincipalArnList":{ + "type":"list", + "member":{"shape":"Arn"}, + "max":1, + "min":1 + }, "ProductInformation":{ "type":"structure", "required":[ @@ -1127,6 +3067,17 @@ "type":"list", "member":{"shape":"ProductInformation"} }, + "ProvisionalConfiguration":{ + "type":"structure", + "required":["MaxTimeToLiveInMinutes"], + "members":{ + "MaxTimeToLiveInMinutes":{ + "shape":"BoxInteger", + "documentation":"

Maximum time for the provisional configuration, in minutes.

" + } + }, + "documentation":"

Details about a provisional configuration.

" + }, "RateLimitExceededException":{ "type":"structure", "members":{ @@ -1135,6 +3086,76 @@ "documentation":"

Too many requests have been submitted. Try again after a brief wait.

", "exception":true }, + "ReceivedMetadata":{ + "type":"structure", + "members":{ + "ReceivedStatus":{ + "shape":"ReceivedStatus", + "documentation":"

Received status.

" + }, + "AllowedOperations":{ + "shape":"AllowedOperationList", + "documentation":"

Allowed operations.

" + } + }, + "documentation":"

Metadata associated with received licenses and grants.

" + }, + "ReceivedStatus":{ + "type":"string", + "enum":[ + "PENDING_WORKFLOW", + "PENDING_ACCEPT", + "REJECTED", + "ACTIVE", + "FAILED_WORKFLOW", + "DELETED", + "DISABLED" + ] + }, + "RedirectException":{ + "type":"structure", + "members":{ + "Location":{"shape":"Location"}, + "Message":{"shape":"Message"} + }, + "documentation":"

This is not the correct Region for the resource. Try again.

", + "exception":true + }, + "RejectGrantRequest":{ + "type":"structure", + "required":["GrantArn"], + "members":{ + "GrantArn":{ + "shape":"Arn", + "documentation":"

Amazon Resource Name (ARN) of the grant.

" + } + } + }, + "RejectGrantResponse":{ + "type":"structure", + "members":{ + "GrantArn":{ + "shape":"Arn", + "documentation":"

Grant ARN.

" + }, + "Status":{ + "shape":"GrantStatus", + "documentation":"

Grant status.

" + }, + "Version":{ + "shape":"String", + "documentation":"

Grant version.

" + } + } + }, + "RenewType":{ + "type":"string", + "enum":[ + "None", + "Weekly", + "Monthly" + ] + }, "ResourceInventory":{ "type":"structure", "members":{ @@ -1177,6 +3198,14 @@ "documentation":"

Your resource limits have been exceeded.

", "exception":true }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

The resource cannot be found.

", + "exception":true + }, "ResourceType":{ "type":"string", "enum":[ @@ -1196,6 +3225,10 @@ "exception":true, "fault":true }, + "SignedToken":{ + "type":"string", + "min":4096 + }, "String":{"type":"string"}, "StringList":{ "type":"list", @@ -1245,6 +3278,61 @@ "members":{ } }, + "TokenData":{ + "type":"structure", + "members":{ + "TokenId":{ + "shape":"String", + "documentation":"

Token ID.

" + }, + "TokenType":{ + "shape":"String", + "documentation":"

Type of token generated. The supported value is REFRESH_TOKEN.

" + }, + "LicenseArn":{ + "shape":"String", + "documentation":"

Amazon Resource Name (ARN) of the license.

" + }, + "ExpirationTime":{ + "shape":"ISO8601DateTime", + "documentation":"

Token expiration time, in ISO8601-UTC format.

" + }, + "TokenProperties":{ + "shape":"MaxSize3StringList", + "documentation":"

Data specified by the caller.

" + }, + "RoleArns":{ + "shape":"ArnList", + "documentation":"

Amazon Resource Names (ARN) of the roles included in the token.

" + }, + "Status":{ + "shape":"String", + "documentation":"

Token status. The possible values are AVAILABLE and DELETED.

" + } + }, + "documentation":"

Describes a token.

" + }, + "TokenList":{ + "type":"list", + "member":{"shape":"TokenData"} + }, + "TokenString":{ + "type":"string", + "max":4096, + "pattern":"\\S+" + }, + "TokenType":{ + "type":"string", + "enum":["REFRESH_TOKEN"] + }, + "UnsupportedDigitalSignatureMethodException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

The digital signature method is unsupported. Try your request again.

", + "exception":true + }, "UntagResourceRequest":{ "type":"structure", "required":[ @@ -1358,6 +3446,14 @@ "type":"structure", "members":{ } + }, + "ValidationException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

The provided input is not valid. Try your request again.

", + "exception":true } }, "documentation":" AWS License Manager

AWS License Manager makes it easier to manage licenses from software vendors across multiple AWS accounts and on-premises servers.

" diff --git a/services/lightsail/pom.xml b/services/lightsail/pom.xml index 9426ccc4edbb..c3d7debfeef6 100644 --- a/services/lightsail/pom.xml +++ b/services/lightsail/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT lightsail AWS Java SDK :: Services :: Amazon Lightsail diff --git a/services/lightsail/src/main/resources/codegen-resources/service-2.json b/services/lightsail/src/main/resources/codegen-resources/service-2.json index a3f5cf8033ac..05606d936550 100644 --- a/services/lightsail/src/main/resources/codegen-resources/service-2.json +++ b/services/lightsail/src/main/resources/codegen-resources/service-2.json @@ -217,6 +217,57 @@ ], "documentation":"

Creates an email or SMS text message contact method.

A contact method is used to send you notifications about your Amazon Lightsail resources. You can add one email address and one mobile phone number contact method in each AWS Region. However, SMS text messaging is not supported in some AWS Regions, and SMS text messages cannot be sent to some countries/regions. For more information, see Notifications in Amazon Lightsail.

" }, + "CreateContainerService":{ + "name":"CreateContainerService", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateContainerServiceRequest"}, + "output":{"shape":"CreateContainerServiceResult"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"InvalidInputException"}, + {"shape":"NotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"UnauthenticatedException"} + ], + "documentation":"

Creates an Amazon Lightsail container service.

A Lightsail container service is a compute resource to which you can deploy containers. For more information, see Container services in Amazon Lightsail in the Lightsail Dev Guide.

" + }, + "CreateContainerServiceDeployment":{ + "name":"CreateContainerServiceDeployment", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateContainerServiceDeploymentRequest"}, + "output":{"shape":"CreateContainerServiceDeploymentResult"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"InvalidInputException"}, + {"shape":"NotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"UnauthenticatedException"} + ], + "documentation":"

Creates a deployment for your Amazon Lightsail container service.

A deployment specifies the containers that will be launched on the container service and their settings, such as the ports to open, the environment variables to apply, and the launch command to run. It also specifies the container that will serve as the public endpoint of the deployment and its settings, such as the HTTP or HTTPS port to use, and the health check configuration.

You can deploy containers to your container service using container images from a public registry like Docker Hub, or from your local machine. For more information, see Creating container images for your Amazon Lightsail container services in the Lightsail Dev Guide.

" + }, + "CreateContainerServiceRegistryLogin":{ + "name":"CreateContainerServiceRegistryLogin", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateContainerServiceRegistryLoginRequest"}, + "output":{"shape":"CreateContainerServiceRegistryLoginResult"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"InvalidInputException"}, + {"shape":"NotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"UnauthenticatedException"} + ], + "documentation":"

Creates a temporary set of log in credentials that you can use to log in to the Docker process on your local machine. After you're logged in, you can use the native Docker commands to push your local container images to the container image registry of your Amazon Lightsail account so that you can use them with your Lightsail container service. The log in credentials expire 12 hours after they are created, at which point you will need to create a new set of log in credentials.

You can only push container images to the container service registry of your Lightsail account. You cannot pull container images perform any other container image management actions on the container service registry of your Lightsail account.

After you push your container images to the container image registry of your Lightsail account, use the RegisterContainerImage action to register the pushed images to a specific Lightsail container service.

This action is not required if you install and use the Lightsail Control (lightsailctl) plugin to push container images to your Lightsail container service. For more information, see Pushing and managing container images on your Amazon Lightsail container services in the Lightsail Dev Guide.

" + }, "CreateDisk":{ "name":"CreateDisk", "http":{ @@ -290,7 +341,7 @@ {"shape":"AccessDeniedException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Creates an Amazon Lightsail content delivery network (CDN) distribution.

A distribution is a globally distributed network of caching servers that improve the performance of your website or web application hosted on a Lightsail instance. For more information, see Content delivery networks in Amazon Lightsail.

" + "documentation":"

Creates an Amazon Lightsail content delivery network (CDN) distribution.

A distribution is a globally distributed network of caching servers that improve the performance of your website or web application hosted on a Lightsail instance. For more information, see Content delivery networks in Amazon Lightsail.

" }, "CreateDomain":{ "name":"CreateDomain", @@ -328,7 +379,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Creates one of the following entry records associated with the domain: Address (A), canonical name (CNAME), mail exchanger (MX), name server (NS), start of authority (SOA), service locator (SRV), or text (TXT).

The create domain entry operation supports tag-based access control via resource tags applied to the resource identified by domain name. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Creates one of the following domain name system (DNS) records in a domain DNS zone: Address (A), canonical name (CNAME), mail exchanger (MX), name server (NS), start of authority (SOA), service locator (SRV), or text (TXT).

The create domain entry operation supports tag-based access control via resource tags applied to the resource identified by domain name. For more information, see the Lightsail Dev Guide.

" }, "CreateInstanceSnapshot":{ "name":"CreateInstanceSnapshot", @@ -572,6 +623,40 @@ ], "documentation":"

Deletes a contact method.

A contact method is used to send you notifications about your Amazon Lightsail resources. You can add one email address and one mobile phone number contact method in each AWS Region. However, SMS text messaging is not supported in some AWS Regions, and SMS text messages cannot be sent to some countries/regions. For more information, see Notifications in Amazon Lightsail.

" }, + "DeleteContainerImage":{ + "name":"DeleteContainerImage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteContainerImageRequest"}, + "output":{"shape":"DeleteContainerImageResult"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"InvalidInputException"}, + {"shape":"NotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"UnauthenticatedException"} + ], + "documentation":"

Deletes a container image that is registered to your Amazon Lightsail container service.

" + }, + "DeleteContainerService":{ + "name":"DeleteContainerService", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteContainerServiceRequest"}, + "output":{"shape":"DeleteContainerServiceResult"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"InvalidInputException"}, + {"shape":"NotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"UnauthenticatedException"} + ], + "documentation":"

Deletes your Amazon Lightsail container service.

" + }, "DeleteDisk":{ "name":"DeleteDisk", "http":{ @@ -1114,6 +1199,123 @@ ], "documentation":"

Returns information about the configured contact methods. Specify a protocol in your request to return information about a specific contact method.

A contact method is used to send you notifications about your Amazon Lightsail resources. You can add one email address and one mobile phone number contact method in each AWS Region. However, SMS text messaging is not supported in some AWS Regions, and SMS text messages cannot be sent to some countries/regions. For more information, see Notifications in Amazon Lightsail.

" }, + "GetContainerAPIMetadata":{ + "name":"GetContainerAPIMetadata", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetContainerAPIMetadataRequest"}, + "output":{"shape":"GetContainerAPIMetadataResult"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"AccessDeniedException"}, + {"shape":"UnauthenticatedException"} + ], + "documentation":"

Returns information about Amazon Lightsail containers, such as the current version of the Lightsail Control (lightsailctl) plugin.

" + }, + "GetContainerImages":{ + "name":"GetContainerImages", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetContainerImagesRequest"}, + "output":{"shape":"GetContainerImagesResult"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"InvalidInputException"}, + {"shape":"NotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"UnauthenticatedException"} + ], + "documentation":"

Returns the container images that are registered to your Amazon Lightsail container service.

If you created a deployment on your Lightsail container service that uses container images from a public registry like Docker Hub, those images are not returned as part of this action. Those images are not registered to your Lightsail container service.

" + }, + "GetContainerLog":{ + "name":"GetContainerLog", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetContainerLogRequest"}, + "output":{"shape":"GetContainerLogResult"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"InvalidInputException"}, + {"shape":"NotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"UnauthenticatedException"} + ], + "documentation":"

Returns the log events of a container of your Amazon Lightsail container service.

If your container service has more than one node (i.e., a scale greater than 1), then the log events that are returned for the specified container are merged from all nodes on your container service.

Container logs are retained for a certain amount of time. For more information, see Amazon Lightsail endpoints and quotas in the AWS General Reference.

" + }, + "GetContainerServiceDeployments":{ + "name":"GetContainerServiceDeployments", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetContainerServiceDeploymentsRequest"}, + "output":{"shape":"GetContainerServiceDeploymentsResult"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"InvalidInputException"}, + {"shape":"NotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"UnauthenticatedException"} + ], + "documentation":"

Returns the deployments for your Amazon Lightsail container service

A deployment specifies the settings, such as the ports and launch command, of containers that are deployed to your container service.

The deployments are ordered by version in ascending order. The newest version is listed at the top of the response.

A set number of deployments are kept before the oldest one is replaced with the newest one. For more information, see Amazon Lightsail endpoints and quotas in the AWS General Reference.

" + }, + "GetContainerServiceMetricData":{ + "name":"GetContainerServiceMetricData", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetContainerServiceMetricDataRequest"}, + "output":{"shape":"GetContainerServiceMetricDataResult"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"InvalidInputException"}, + {"shape":"NotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"UnauthenticatedException"} + ], + "documentation":"

Returns the data points of a specific metric of your Amazon Lightsail container service.

Metrics report the utilization of your resources. Monitor and collect metric data regularly to maintain the reliability, availability, and performance of your resources.

" + }, + "GetContainerServicePowers":{ + "name":"GetContainerServicePowers", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetContainerServicePowersRequest"}, + "output":{"shape":"GetContainerServicePowersResult"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"InvalidInputException"}, + {"shape":"NotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"UnauthenticatedException"} + ], + "documentation":"

Returns the list of powers that can be specified for your Amazon Lightsail container services.

The power specifies the amount of memory, the number of vCPUs, and the base price of the container service.

" + }, + "GetContainerServices":{ + "name":"GetContainerServices", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetContainerServicesRequest"}, + "output":{"shape":"ContainerServicesListResult"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"InvalidInputException"}, + {"shape":"NotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"UnauthenticatedException"} + ], + "documentation":"

Returns information about one or more of your Amazon Lightsail container services.

" + }, "GetDisk":{ "name":"GetDisk", "http":{ @@ -2078,6 +2280,23 @@ ], "documentation":"

Restarts a specific database in Amazon Lightsail.

The reboot relational database operation supports tag-based access control via resource tags applied to the resource identified by relationalDatabaseName. For more information, see the Lightsail Dev Guide.

" }, + "RegisterContainerImage":{ + "name":"RegisterContainerImage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RegisterContainerImageRequest"}, + "output":{"shape":"RegisterContainerImageResult"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"InvalidInputException"}, + {"shape":"NotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"UnauthenticatedException"} + ], + "documentation":"

Registers a container image to your Amazon Lightsail container service.

This action is not required if you install and use the Lightsail Control (lightsailctl) plugin to push container images to your Lightsail container service. For more information, see Pushing and managing container images on your Amazon Lightsail container services in the Lightsail Dev Guide.

" + }, "ReleaseStaticIp":{ "name":"ReleaseStaticIp", "http":{ @@ -2284,6 +2503,23 @@ ], "documentation":"

Deletes the specified set of tag keys and their values from the specified Amazon Lightsail resource.

The untag resource operation supports tag-based access control via request tags and resource tags applied to the resource identified by resource name. For more information, see the Lightsail Dev Guide.

" }, + "UpdateContainerService":{ + "name":"UpdateContainerService", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateContainerServiceRequest"}, + "output":{"shape":"UpdateContainerServiceResult"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"InvalidInputException"}, + {"shape":"NotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"UnauthenticatedException"} + ], + "documentation":"

Updates the configuration of your Amazon Lightsail container service, such as its power, scale, and public domain names.

" + }, "UpdateDistribution":{ "name":"UpdateDistribution", "http":{ @@ -3274,48 +3510,438 @@ "shape":"NonEmptyString", "documentation":"

The Amazon Resource Name (ARN) of the contact method.

" }, - "createdAt":{ + "createdAt":{ + "shape":"IsoDate", + "documentation":"

The timestamp when the contact method was created.

" + }, + "location":{"shape":"ResourceLocation"}, + "resourceType":{ + "shape":"ResourceType", + "documentation":"

The Lightsail resource type (e.g., ContactMethod).

" + }, + "supportCode":{ + "shape":"string", + "documentation":"

The support code. Include this code in your email to support when you have questions about your Lightsail contact method. This code enables our support team to look up your Lightsail information more easily.

" + } + }, + "documentation":"

Describes a contact method.

A contact method is a way to send you notifications. For more information, see Notifications in Amazon Lightsail.

" + }, + "ContactMethodStatus":{ + "type":"string", + "enum":[ + "PendingVerification", + "Valid", + "Invalid" + ] + }, + "ContactMethodVerificationProtocol":{ + "type":"string", + "enum":["Email"] + }, + "ContactMethodsList":{ + "type":"list", + "member":{"shape":"ContactMethod"} + }, + "ContactProtocol":{ + "type":"string", + "enum":[ + "Email", + "SMS" + ] + }, + "ContactProtocolsList":{ + "type":"list", + "member":{"shape":"ContactProtocol"} + }, + "Container":{ + "type":"structure", + "members":{ + "image":{ + "shape":"string", + "documentation":"

The name of the image used for the container.

Container images sourced from your Lightsail container service, that are registered and stored on your service, start with a colon (:). For example, :container-service-1.mystaticwebsite.1. Container images sourced from a public registry like Docker Hub don't start with a colon. For example, nginx:latest or nginx.

" + }, + "command":{ + "shape":"StringList", + "documentation":"

The launch command for the container.

" + }, + "environment":{ + "shape":"Environment", + "documentation":"

The environment variables of the container.

" + }, + "ports":{ + "shape":"PortMap", + "documentation":"

The open firewall ports of the container.

" + } + }, + "documentation":"

Describes the settings of a container that will be launched, or that is launched, to an Amazon Lightsail container service.

" + }, + "ContainerImage":{ + "type":"structure", + "members":{ + "image":{ + "shape":"string", + "documentation":"

The name of the container image.

" + }, + "digest":{ + "shape":"string", + "documentation":"

The digest of the container image.

" + }, + "createdAt":{ + "shape":"IsoDate", + "documentation":"

The timestamp when the container image was created.

" + } + }, + "documentation":"

Describes a container image that is registered to an Amazon Lightsail container service.

" + }, + "ContainerImageList":{ + "type":"list", + "member":{"shape":"ContainerImage"} + }, + "ContainerLabel":{ + "type":"string", + "max":53, + "min":1, + "pattern":"^[a-z0-9]{1,2}|[a-z0-9][a-z0-9-]+[a-z0-9]$" + }, + "ContainerMap":{ + "type":"map", + "key":{"shape":"ContainerName"}, + "value":{"shape":"Container"} + }, + "ContainerName":{ + "type":"string", + "max":53, + "min":1, + "pattern":"^[a-z0-9]{1,2}|[a-z0-9][a-z0-9-]+[a-z0-9]$" + }, + "ContainerService":{ + "type":"structure", + "members":{ + "containerServiceName":{ + "shape":"ContainerServiceName", + "documentation":"

The name of the container service.

" + }, + "arn":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon Resource Name (ARN) of the container service.

" + }, + "createdAt":{ + "shape":"IsoDate", + "documentation":"

The timestamp when the container service was created.

" + }, + "location":{ + "shape":"ResourceLocation", + "documentation":"

An object that describes the location of the container service, such as the AWS Region and Availability Zone.

" + }, + "resourceType":{ + "shape":"ResourceType", + "documentation":"

The Lightsail resource type of the container service (i.e., ContainerService).

" + }, + "tags":{ + "shape":"TagList", + "documentation":"

The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Lightsail Dev Guide.

" + }, + "power":{ + "shape":"ContainerServicePowerName", + "documentation":"

The power specification of the container service.

The power specifies the amount of RAM, the number of vCPUs, and the base price of the container service.

" + }, + "powerId":{ + "shape":"string", + "documentation":"

The ID of the power of the container service.

" + }, + "state":{ + "shape":"ContainerServiceState", + "documentation":"

The current state of the container service.

The state can be:

  • Pending - The container service is being created.

  • Ready - The container service is created but does not have a container deployment.

  • Disabled - The container service is disabled.

  • Updating - The container service capacity or other setting is being updated.

  • Deploying - The container service is launching a container deployment.

  • Running - The container service is created and it has a container deployment.

" + }, + "scale":{ + "shape":"ContainerServiceScale", + "documentation":"

The scale specification of the container service.

The scale specifies the allocated compute nodes of the container service.

" + }, + "currentDeployment":{ + "shape":"ContainerServiceDeployment", + "documentation":"

An object that describes the current container deployment of the container service.

" + }, + "nextDeployment":{ + "shape":"ContainerServiceDeployment", + "documentation":"

An object that describes the next deployment of the container service.

This value is null when there is no deployment in a pending state.

" + }, + "isDisabled":{ + "shape":"boolean", + "documentation":"

A Boolean value indicating whether the container service is disabled.

" + }, + "principalArn":{ + "shape":"string", + "documentation":"

The principal ARN of the container service.

The principal ARN can be used to create a trust relationship between your standard AWS account and your Lightsail container service. This allows you to give your service permission to access resources in your standard AWS account.

" + }, + "privateDomainName":{ + "shape":"string", + "documentation":"

The private domain name of the container service.

The private domain name is accessible only by other resources within the default virtual private cloud (VPC) of your Lightsail account.

" + }, + "publicDomainNames":{ + "shape":"ContainerServicePublicDomains", + "documentation":"

The public domain name of the container service, such as example.com and www.example.com.

You can specify up to four public domain names for a container service. The domain names that you specify are used when you create a deployment with a container configured as the public endpoint of your container service.

If you don't specify public domain names, then you can use the default domain of the container service.

You must create and validate an SSL/TLS certificate before you can use public domain names with your container service. Use the CreateCertificate action to create a certificate for the public domain names you want to use with your container service.

See CreateContainerService or UpdateContainerService for information about how to specify public domain names for your Lightsail container service.

" + }, + "url":{ + "shape":"string", + "documentation":"

The publicly accessible URL of the container service.

If no public endpoint is specified in the currentDeployment, this URL returns a 404 response.

" + } + }, + "documentation":"

Describes an Amazon Lightsail container service.

" + }, + "ContainerServiceDeployment":{ + "type":"structure", + "members":{ + "version":{ + "shape":"integer", + "documentation":"

The version number of the deployment.

" + }, + "state":{ + "shape":"ContainerServiceDeploymentState", + "documentation":"

The state of the deployment.

A deployment can be in one of the following states:

  • Activating - The deployment is being created.

  • Active - The deployment was successfully created, and it's currently running on the container service. The container service can have only one deployment in an active state at a time.

  • Inactive - The deployment was previously successfully created, but it is not currently running on the container service.

  • Failed - The deployment failed. Use the GetContainerLog action to view the log events for the containers in the deployment to try to determine the reason for the failure.

" + }, + "containers":{ + "shape":"ContainerMap", + "documentation":"

An object that describes the configuration for the containers of the deployment.

" + }, + "publicEndpoint":{ + "shape":"ContainerServiceEndpoint", + "documentation":"

An object that describes the endpoint of the deployment.

" + }, + "createdAt":{ + "shape":"IsoDate", + "documentation":"

The timestamp when the deployment was created.

" + } + }, + "documentation":"

Describes a container deployment configuration of an Amazon Lightsail container service.

A deployment specifies the settings, such as the ports and launch command, of containers that are deployed to your container service.

" + }, + "ContainerServiceDeploymentList":{ + "type":"list", + "member":{"shape":"ContainerServiceDeployment"} + }, + "ContainerServiceDeploymentRequest":{ + "type":"structure", + "members":{ + "containers":{ + "shape":"ContainerMap", + "documentation":"

An object that describes the configuration for the containers of the deployment.

" + }, + "publicEndpoint":{ + "shape":"EndpointRequest", + "documentation":"

An object that describes the endpoint of the deployment.

" + } + }, + "documentation":"

Describes a container deployment configuration of an Amazon Lightsail container service.

A deployment specifies the settings, such as the ports and launch command, of containers that are deployed to your container service.

" + }, + "ContainerServiceDeploymentState":{ + "type":"string", + "enum":[ + "ACTIVATING", + "ACTIVE", + "INACTIVE", + "FAILED" + ] + }, + "ContainerServiceEndpoint":{ + "type":"structure", + "members":{ + "containerName":{ + "shape":"string", + "documentation":"

The name of the container entry of the deployment that the endpoint configuration applies to.

" + }, + "containerPort":{ + "shape":"integer", + "documentation":"

The port of the specified container to which traffic is forwarded to.

" + }, + "healthCheck":{ + "shape":"ContainerServiceHealthCheckConfig", + "documentation":"

An object that describes the health check configuration of the container.

" + } + }, + "documentation":"

Describes the public endpoint configuration of a deployment of an Amazon Lightsail container service.

" + }, + "ContainerServiceHealthCheckConfig":{ + "type":"structure", + "members":{ + "healthyThreshold":{ + "shape":"integer", + "documentation":"

The number of consecutive health checks successes required before moving the container to the Healthy state.

" + }, + "unhealthyThreshold":{ + "shape":"integer", + "documentation":"

The number of consecutive health check failures required before moving the container to the Unhealthy state.

" + }, + "timeoutSeconds":{ + "shape":"integer", + "documentation":"

The amount of time, in seconds, during which no response means a failed health check. You may specify between 2 and 60 seconds.

" + }, + "intervalSeconds":{ + "shape":"integer", + "documentation":"

The approximate interval, in seconds, between health checks of an individual container. You may specify between 5 and 300 seconds.

" + }, + "path":{ + "shape":"string", + "documentation":"

The path on the container on which to perform the health check.

" + }, + "successCodes":{ + "shape":"string", + "documentation":"

The HTTP codes to use when checking for a successful response from a container. You can specify values between 200 and 499.

" + } + }, + "documentation":"

Describes the health check configuration of an Amazon Lightsail container service.

" + }, + "ContainerServiceList":{ + "type":"list", + "member":{"shape":"ContainerService"} + }, + "ContainerServiceLogEvent":{ + "type":"structure", + "members":{ + "createdAt":{ + "shape":"IsoDate", + "documentation":"

The timestamp when the container service log event was created.

" + }, + "message":{ + "shape":"string", + "documentation":"

The message of the container service log event.

" + } + }, + "documentation":"

Describes the log events of a container of an Amazon Lightsail container service.

" + }, + "ContainerServiceLogEventList":{ + "type":"list", + "member":{"shape":"ContainerServiceLogEvent"} + }, + "ContainerServiceMetadataEntry":{ + "type":"map", + "key":{"shape":"string"}, + "value":{"shape":"string"} + }, + "ContainerServiceMetadataEntryList":{ + "type":"list", + "member":{"shape":"ContainerServiceMetadataEntry"} + }, + "ContainerServiceMetricName":{ + "type":"string", + "enum":[ + "CPUUtilization", + "MemoryUtilization" + ] + }, + "ContainerServiceName":{ + "type":"string", + "max":63, + "min":1, + "pattern":"^[a-z0-9]{1,2}|[a-z0-9][a-z0-9-]+[a-z0-9]$" + }, + "ContainerServicePower":{ + "type":"structure", + "members":{ + "powerId":{ + "shape":"string", + "documentation":"

The ID of the power (e.g., nano-1).

" + }, + "price":{ + "shape":"float", + "documentation":"

The monthly price of the power in USD.

" + }, + "cpuCount":{ + "shape":"float", + "documentation":"

The number of vCPUs included in the power.

" + }, + "ramSizeInGb":{ + "shape":"float", + "documentation":"

The amount of RAM (in GB) of the power.

" + }, + "name":{ + "shape":"string", + "documentation":"

The friendly name of the power (e.g., nano).

" + }, + "isActive":{ + "shape":"boolean", + "documentation":"

A Boolean value indicating whether the power is active and can be specified for container services.

" + } + }, + "documentation":"

Describes the powers that can be specified for an Amazon Lightsail container service.

The power specifies the amount of RAM, the number of vCPUs, and the base price of the container service.

" + }, + "ContainerServicePowerList":{ + "type":"list", + "member":{"shape":"ContainerServicePower"} + }, + "ContainerServicePowerName":{ + "type":"string", + "enum":[ + "nano", + "micro", + "small", + "medium", + "large", + "xlarge" + ] + }, + "ContainerServiceProtocol":{ + "type":"string", + "enum":[ + "HTTP", + "HTTPS", + "TCP", + "UDP" + ] + }, + "ContainerServicePublicDomains":{ + "type":"map", + "key":{"shape":"string"}, + "value":{"shape":"ContainerServicePublicDomainsList"} + }, + "ContainerServicePublicDomainsList":{ + "type":"list", + "member":{"shape":"string"} + }, + "ContainerServiceRegistryLogin":{ + "type":"structure", + "members":{ + "username":{ + "shape":"string", + "documentation":"

The container service registry username to use to push container images to the container image registry of a Lightsail account.

" + }, + "password":{ + "shape":"string", + "documentation":"

The container service registry password to use to push container images to the container image registry of a Lightsail account

" + }, + "expiresAt":{ "shape":"IsoDate", - "documentation":"

The timestamp when the contact method was created.

" - }, - "location":{"shape":"ResourceLocation"}, - "resourceType":{ - "shape":"ResourceType", - "documentation":"

The Lightsail resource type (e.g., ContactMethod).

" + "documentation":"

The timestamp of when the container image registry username and password expire.

The log in credentials expire 12 hours after they are created, at which point you will need to create a new set of log in credentials using the CreateContainerServiceRegistryLogin action.

" }, - "supportCode":{ + "registry":{ "shape":"string", - "documentation":"

The support code. Include this code in your email to support when you have questions about your Lightsail contact method. This code enables our support team to look up your Lightsail information more easily.

" + "documentation":"

The address to use to push container images to the container image registry of a Lightsail account.

" } }, - "documentation":"

Describes a contact method.

A contact method is a way to send you notifications. For more information, see Notifications in Amazon Lightsail.

" - }, - "ContactMethodStatus":{ - "type":"string", - "enum":[ - "PendingVerification", - "Valid", - "Invalid" - ] - }, - "ContactMethodVerificationProtocol":{ - "type":"string", - "enum":["Email"] + "documentation":"

Describes the login information for the container image registry of an Amazon Lightsail account.

" }, - "ContactMethodsList":{ - "type":"list", - "member":{"shape":"ContactMethod"} + "ContainerServiceScale":{ + "type":"integer", + "max":20, + "min":1 }, - "ContactProtocol":{ + "ContainerServiceState":{ "type":"string", "enum":[ - "Email", - "SMS" + "PENDING", + "READY", + "RUNNING", + "UPDATING", + "DELETING", + "DISABLED" ] }, - "ContactProtocolsList":{ - "type":"list", - "member":{"shape":"ContactProtocol"} + "ContainerServicesListResult":{ + "type":"structure", + "members":{ + "containerServices":{ + "shape":"ContainerServiceList", + "documentation":"

An array of objects that describe one or more container services.

" + } + } }, "CookieObject":{ "type":"structure", @@ -3456,6 +4082,90 @@ } } }, + "CreateContainerServiceDeploymentRequest":{ + "type":"structure", + "required":["serviceName"], + "members":{ + "serviceName":{ + "shape":"ContainerServiceName", + "documentation":"

The name of the container service for which to create the deployment.

" + }, + "containers":{ + "shape":"ContainerMap", + "documentation":"

An object that describes the settings of the containers that will be launched on the container service.

" + }, + "publicEndpoint":{ + "shape":"EndpointRequest", + "documentation":"

An object that describes the settings of the public endpoint for the container service.

" + } + } + }, + "CreateContainerServiceDeploymentResult":{ + "type":"structure", + "members":{ + "containerService":{ + "shape":"ContainerService", + "documentation":"

An object that describes a container service.

" + } + } + }, + "CreateContainerServiceRegistryLoginRequest":{ + "type":"structure", + "members":{ + } + }, + "CreateContainerServiceRegistryLoginResult":{ + "type":"structure", + "members":{ + "registryLogin":{ + "shape":"ContainerServiceRegistryLogin", + "documentation":"

An object that describes the log in information for the container service registry of your Lightsail account.

" + } + } + }, + "CreateContainerServiceRequest":{ + "type":"structure", + "required":[ + "serviceName", + "power", + "scale" + ], + "members":{ + "serviceName":{ + "shape":"ContainerServiceName", + "documentation":"

The name for the container service.

The name that you specify for your container service will make up part of its default domain. The default domain of a container service is typically https://<ServiceName>.<RandomGUID>.<AWSRegion>.cs.amazonlightsail.com. If the name of your container service is container-service-1, and it's located in the US East (Ohio) AWS region (us-east-2), then the domain for your container service will be like the following example: https://container-service-1.ur4EXAMPLE2uq.us-east-2.cs.amazonlightsail.com

The following are the requirements for container service names:

  • Must be unique within each AWS Region in your Lightsail account.

  • Must contain 1 to 63 characters.

  • Must contain only alphanumeric characters and hyphens.

  • A hyphen (-) can separate words but cannot be at the start or end of the name.

" + }, + "power":{ + "shape":"ContainerServicePowerName", + "documentation":"

The power specification for the container service.

The power specifies the amount of memory, vCPUs, and base monthly cost of each node of the container service. The power and scale of a container service makes up its configured capacity. To determine the monthly price of your container service, multiply the base price of the power with the scale (the number of nodes) of the service.

Use the GetContainerServicePowers action to get a list of power options that you can specify using this parameter, and their base monthly cost.

" + }, + "scale":{ + "shape":"ContainerServiceScale", + "documentation":"

The scale specification for the container service.

The scale specifies the allocated compute nodes of the container service. The power and scale of a container service makes up its configured capacity. To determine the monthly price of your container service, multiply the base price of the power with the scale (the number of nodes) of the service.

" + }, + "tags":{ + "shape":"TagList", + "documentation":"

The tag keys and optional values for the container service.

For more information about tags in Lightsail, see the Lightsail Dev Guide.

" + }, + "publicDomainNames":{ + "shape":"ContainerServicePublicDomains", + "documentation":"

The public domain names to use with the container service, such as example.com and www.example.com.

You can specify up to four public domain names for a container service. The domain names that you specify are used when you create a deployment with a container configured as the public endpoint of your container service.

If you don't specify public domain names, then you can use the default domain of the container service.

You must create and validate an SSL/TLS certificate before you can use public domain names with your container service. Use the CreateCertificate action to create a certificate for the public domain names you want to use with your container service.

You can specify public domain names using a string to array map as shown in the example later on this page.

" + }, + "deployment":{ + "shape":"ContainerServiceDeploymentRequest", + "documentation":"

An object that describes a deployment for the container service.

A deployment specifies the containers that will be launched on the container service and their settings, such as the ports to open, the environment variables to apply, and the launch command to run. It also specifies the container that will serve as the public endpoint of the deployment and its settings, such as the HTTP or HTTPS port to use, and the health check configuration.

" + } + } + }, + "CreateContainerServiceResult":{ + "type":"structure", + "members":{ + "containerService":{ + "shape":"ContainerService", + "documentation":"

An object that describes a container service.

" + } + } + }, "CreateDiskFromSnapshotRequest":{ "type":"structure", "required":[ @@ -4185,6 +4895,43 @@ } } }, + "DeleteContainerImageRequest":{ + "type":"structure", + "required":[ + "serviceName", + "image" + ], + "members":{ + "serviceName":{ + "shape":"ContainerServiceName", + "documentation":"

The name of the container service for which to delete a registered container image.

" + }, + "image":{ + "shape":"string", + "documentation":"

The name of the container image to delete from the container service.

Use the GetContainerImages action to get the name of the container images that are registered to a container service.

Container images sourced from your Lightsail container service, that are registered and stored on your service, start with a colon (:). For example, :container-service-1.mystaticwebsite.1. Container images sourced from a public registry like Docker Hub don't start with a colon. For example, nginx:latest or nginx.

" + } + } + }, + "DeleteContainerImageResult":{ + "type":"structure", + "members":{ + } + }, + "DeleteContainerServiceRequest":{ + "type":"structure", + "required":["serviceName"], + "members":{ + "serviceName":{ + "shape":"ContainerServiceName", + "documentation":"

The name of the container service to delete.

" + } + } + }, + "DeleteContainerServiceResult":{ + "type":"structure", + "members":{ + } + }, "DeleteDiskRequest":{ "type":"structure", "required":["diskName"], @@ -4911,7 +5658,7 @@ }, "isAlias":{ "shape":"boolean", - "documentation":"

When true, specifies whether the domain entry is an alias used by the Lightsail load balancer. You can include an alias (A type) record in your request, which points to a load balancer DNS name and routes traffic to your load balancer

" + "documentation":"

When true, specifies whether the domain entry is an alias used by the Lightsail load balancer. You can include an alias (A type) record in your request, which points to a load balancer DNS name and routes traffic to your load balancer.

" }, "type":{ "shape":"DomainEntryType", @@ -5008,6 +5755,33 @@ } } }, + "EndpointRequest":{ + "type":"structure", + "required":[ + "containerName", + "containerPort" + ], + "members":{ + "containerName":{ + "shape":"string", + "documentation":"

The name of the container for the endpoint.

" + }, + "containerPort":{ + "shape":"integer", + "documentation":"

The port of the container to which traffic is forwarded to.

" + }, + "healthCheck":{ + "shape":"ContainerServiceHealthCheckConfig", + "documentation":"

An object that describes the health check configuration of the container.

" + } + }, + "documentation":"

Describes the settings of a public endpoint for an Amazon Lightsail container service.

" + }, + "Environment":{ + "type":"map", + "key":{"shape":"string"}, + "value":{"shape":"string"} + }, "ExportSnapshotRecord":{ "type":"structure", "members":{ @@ -5140,7 +5914,7 @@ }, "nextPageToken":{ "shape":"string", - "documentation":"

The token to advance to the next page of resutls from your request.

A next page token is not returned if there are no more results to display.

To get the next page of results, perform another GetActiveNames request and specify the next page token using the pageToken parameter.

" + "documentation":"

The token to advance to the next page of results from your request.

A next page token is not returned if there are no more results to display.

To get the next page of results, perform another GetActiveNames request and specify the next page token using the pageToken parameter.

" } } }, @@ -5170,7 +5944,7 @@ }, "nextPageToken":{ "shape":"string", - "documentation":"

The token to advance to the next page of resutls from your request.

A next page token is not returned if there are no more results to display.

To get the next page of results, perform another GetAlarms request and specify the next page token using the pageToken parameter.

" + "documentation":"

The token to advance to the next page of results from your request.

A next page token is not returned if there are no more results to display.

To get the next page of results, perform another GetAlarms request and specify the next page token using the pageToken parameter.

" } } }, @@ -5223,7 +5997,7 @@ }, "nextPageToken":{ "shape":"string", - "documentation":"

The token to advance to the next page of resutls from your request.

A next page token is not returned if there are no more results to display.

To get the next page of results, perform another GetBlueprints request and specify the next page token using the pageToken parameter.

" + "documentation":"

The token to advance to the next page of results from your request.

A next page token is not returned if there are no more results to display.

To get the next page of results, perform another GetBlueprints request and specify the next page token using the pageToken parameter.

" } } }, @@ -5249,7 +6023,7 @@ }, "nextPageToken":{ "shape":"string", - "documentation":"

The token to advance to the next page of resutls from your request.

A next page token is not returned if there are no more results to display.

To get the next page of results, perform another GetBundles request and specify the next page token using the pageToken parameter.

" + "documentation":"

The token to advance to the next page of results from your request.

A next page token is not returned if there are no more results to display.

To get the next page of results, perform another GetBundles request and specify the next page token using the pageToken parameter.

" } } }, @@ -5258,7 +6032,7 @@ "members":{ "certificateStatuses":{ "shape":"CertificateStatusList", - "documentation":"

The status of the certificates for which to return information.

For example, specify ISSUED to return only certificates with an ISSUED status.

When omitted, the response includes all of your certificates in the AWS region where the request is made, regardless of their current status.

" + "documentation":"

The status of the certificates for which to return information.

For example, specify ISSUED to return only certificates with an ISSUED status.

When omitted, the response includes all of your certificates in the AWS Region where the request is made, regardless of their current status.

" }, "includeCertificateDetails":{ "shape":"IncludeCertificateDetails", @@ -5266,7 +6040,7 @@ }, "certificateName":{ "shape":"CertificateName", - "documentation":"

The name for the certificate for which to return information.

When omitted, the response includes all of your certificates in the AWS region where the request is made.

" + "documentation":"

The name for the certificate for which to return information.

When omitted, the response includes all of your certificates in the AWS Region where the request is made.

" } } }, @@ -5297,7 +6071,7 @@ }, "nextPageToken":{ "shape":"string", - "documentation":"

The token to advance to the next page of resutls from your request.

A next page token is not returned if there are no more results to display.

To get the next page of results, perform another GetCloudFormationStackRecords request and specify the next page token using the pageToken parameter.

" + "documentation":"

The token to advance to the next page of results from your request.

A next page token is not returned if there are no more results to display.

To get the next page of results, perform another GetCloudFormationStackRecords request and specify the next page token using the pageToken parameter.

" } } }, @@ -5319,6 +6093,177 @@ } } }, + "GetContainerAPIMetadataRequest":{ + "type":"structure", + "members":{ + } + }, + "GetContainerAPIMetadataResult":{ + "type":"structure", + "members":{ + "metadata":{ + "shape":"ContainerServiceMetadataEntryList", + "documentation":"

Metadata about Lightsail containers, such as the current version of the Lightsail Control (lightsailctl) plugin.

" + } + } + }, + "GetContainerImagesRequest":{ + "type":"structure", + "required":["serviceName"], + "members":{ + "serviceName":{ + "shape":"ContainerServiceName", + "documentation":"

The name of the container service for which to return registered container images.

" + } + } + }, + "GetContainerImagesResult":{ + "type":"structure", + "members":{ + "containerImages":{ + "shape":"ContainerImageList", + "documentation":"

An array of objects that describe container images that are registered to the container service.

" + } + } + }, + "GetContainerLogRequest":{ + "type":"structure", + "required":[ + "serviceName", + "containerName" + ], + "members":{ + "serviceName":{ + "shape":"ContainerServiceName", + "documentation":"

The name of the container service for which to get a container log.

" + }, + "containerName":{ + "shape":"string", + "documentation":"

The name of the container that is either running or previously ran on the container service for which to return a log.

" + }, + "startTime":{ + "shape":"IsoDate", + "documentation":"

The start of the time interval for which to get log data.

Constraints:

  • Specified in Coordinated Universal Time (UTC).

  • Specified in the Unix time format.

    For example, if you wish to use a start time of October 1, 2018, at 8 PM UTC, specify 1538424000 as the start time.

You can convert a human-friendly time to Unix time format using a converter like Epoch converter.

" + }, + "endTime":{ + "shape":"IsoDate", + "documentation":"

The end of the time interval for which to get log data.

Constraints:

  • Specified in Coordinated Universal Time (UTC).

  • Specified in the Unix time format.

    For example, if you wish to use an end time of October 1, 2018, at 9 PM UTC, specify 1538427600 as the end time.

You can convert a human-friendly time to Unix time format using a converter like Epoch converter.

" + }, + "filterPattern":{ + "shape":"string", + "documentation":"

The pattern to use to filter the returned log events to a specific term.

The following are a few examples of filter patterns that you can specify:

  • To return all log events, specify a filter pattern of \"\".

  • To exclude log events that contain the ERROR term, and return all other log events, specify a filter pattern of \"-ERROR\".

  • To return log events that contain the ERROR term, specify a filter pattern of \"ERROR\".

  • To return log events that contain both the ERROR and Exception terms, specify a filter pattern of \"ERROR Exception\".

  • To return log events that contain the ERROR or the Exception term, specify a filter pattern of \"?ERROR ?Exception\".

" + }, + "pageToken":{ + "shape":"string", + "documentation":"

The token to advance to the next page of results from your request.

To get a page token, perform an initial GetContainerLog request. If your results are paginated, the response will return a next page token that you can specify as the page token in a subsequent request.

" + } + } + }, + "GetContainerLogResult":{ + "type":"structure", + "members":{ + "logEvents":{ + "shape":"ContainerServiceLogEventList", + "documentation":"

An array of objects that describe the log events of a container.

" + }, + "nextPageToken":{ + "shape":"string", + "documentation":"

The token to advance to the next page of results from your request.

A next page token is not returned if there are no more results to display.

To get the next page of results, perform another GetContainerLog request and specify the next page token using the pageToken parameter.

" + } + } + }, + "GetContainerServiceDeploymentsRequest":{ + "type":"structure", + "required":["serviceName"], + "members":{ + "serviceName":{ + "shape":"ContainerServiceName", + "documentation":"

The name of the container service for which to return deployments.

" + } + } + }, + "GetContainerServiceDeploymentsResult":{ + "type":"structure", + "members":{ + "deployments":{ + "shape":"ContainerServiceDeploymentList", + "documentation":"

An array of objects that describe deployments for a container service.

" + } + } + }, + "GetContainerServiceMetricDataRequest":{ + "type":"structure", + "required":[ + "serviceName", + "metricName", + "startTime", + "endTime", + "period", + "statistics" + ], + "members":{ + "serviceName":{ + "shape":"ContainerServiceName", + "documentation":"

The name of the container service for which to get metric data.

" + }, + "metricName":{ + "shape":"ContainerServiceMetricName", + "documentation":"

The metric for which you want to return information.

Valid container service metric names are listed below, along with the most useful statistics to include in your request, and the published unit value.

  • CPUUtilization - The average percentage of compute units that are currently in use across all nodes of the container service. This metric identifies the processing power required to run containers on each node of the container service.

    Statistics: The most useful statistics are Maximum and Average.

    Unit: The published unit is Percent.

  • MemoryUtilization - The average percentage of available memory that is currently in use across all nodes of the container service. This metric identifies the memory required to run containers on each node of the container service.

    Statistics: The most useful statistics are Maximum and Average.

    Unit: The published unit is Percent.

" + }, + "startTime":{ + "shape":"IsoDate", + "documentation":"

The start time of the time period.

" + }, + "endTime":{ + "shape":"IsoDate", + "documentation":"

The end time of the time period.

" + }, + "period":{ + "shape":"MetricPeriod", + "documentation":"

The granularity, in seconds, of the returned data points.

All container service metric data is available in 5-minute (300 seconds) granularity.

" + }, + "statistics":{ + "shape":"MetricStatisticList", + "documentation":"

The statistic for the metric.

The following statistics are available:

  • Minimum - The lowest value observed during the specified period. Use this value to determine low volumes of activity for your application.

  • Maximum - The highest value observed during the specified period. Use this value to determine high volumes of activity for your application.

  • Sum - All values submitted for the matching metric added together. You can use this statistic to determine the total volume of a metric.

  • Average - The value of Sum / SampleCount during the specified period. By comparing this statistic with the Minimum and Maximum values, you can determine the full scope of a metric and how close the average use is to the Minimum and Maximum values. This comparison helps you to know when to increase or decrease your resources.

  • SampleCount - The count, or number, of data points used for the statistical calculation.

" + } + } + }, + "GetContainerServiceMetricDataResult":{ + "type":"structure", + "members":{ + "metricName":{ + "shape":"ContainerServiceMetricName", + "documentation":"

The name of the metric returned.

" + }, + "metricData":{ + "shape":"MetricDatapointList", + "documentation":"

An array of objects that describe the metric data returned.

" + } + } + }, + "GetContainerServicePowersRequest":{ + "type":"structure", + "members":{ + } + }, + "GetContainerServicePowersResult":{ + "type":"structure", + "members":{ + "powers":{ + "shape":"ContainerServicePowerList", + "documentation":"

An array of objects that describe the powers that can be specified for a container service.

" + } + } + }, + "GetContainerServicesRequest":{ + "type":"structure", + "members":{ + "serviceName":{ + "shape":"ContainerServiceName", + "documentation":"

The name of the container service for which to return information.

When omitted, the response includes all of your container services in the AWS Region where the request is made.

" + } + } + }, "GetDiskRequest":{ "type":"structure", "required":["diskName"], @@ -5375,7 +6320,7 @@ }, "nextPageToken":{ "shape":"string", - "documentation":"

The token to advance to the next page of resutls from your request.

A next page token is not returned if there are no more results to display.

To get the next page of results, perform another GetDiskSnapshots request and specify the next page token using the pageToken parameter.

" + "documentation":"

The token to advance to the next page of results from your request.

A next page token is not returned if there are no more results to display.

To get the next page of results, perform another GetDiskSnapshots request and specify the next page token using the pageToken parameter.

" } } }, @@ -5397,7 +6342,7 @@ }, "nextPageToken":{ "shape":"string", - "documentation":"

The token to advance to the next page of resutls from your request.

A next page token is not returned if there are no more results to display.

To get the next page of results, perform another GetDisks request and specify the next page token using the pageToken parameter.

" + "documentation":"

The token to advance to the next page of results from your request.

A next page token is not returned if there are no more results to display.

To get the next page of results, perform another GetDisks request and specify the next page token using the pageToken parameter.

" } } }, @@ -5555,7 +6500,7 @@ }, "nextPageToken":{ "shape":"string", - "documentation":"

The token to advance to the next page of resutls from your request.

A next page token is not returned if there are no more results to display.

To get the next page of results, perform another GetDomains request and specify the next page token using the pageToken parameter.

" + "documentation":"

The token to advance to the next page of results from your request.

A next page token is not returned if there are no more results to display.

To get the next page of results, perform another GetDomains request and specify the next page token using the pageToken parameter.

" } } }, @@ -5577,7 +6522,7 @@ }, "nextPageToken":{ "shape":"string", - "documentation":"

The token to advance to the next page of resutls from your request.

A next page token is not returned if there are no more results to display.

To get the next page of results, perform another GetExportSnapshotRecords request and specify the next page token using the pageToken parameter.

" + "documentation":"

The token to advance to the next page of results from your request.

A next page token is not returned if there are no more results to display.

To get the next page of results, perform another GetExportSnapshotRecords request and specify the next page token using the pageToken parameter.

" } } }, @@ -5734,7 +6679,7 @@ }, "nextPageToken":{ "shape":"string", - "documentation":"

The token to advance to the next page of resutls from your request.

A next page token is not returned if there are no more results to display.

To get the next page of results, perform another GetInstanceSnapshots request and specify the next page token using the pageToken parameter.

" + "documentation":"

The token to advance to the next page of results from your request.

A next page token is not returned if there are no more results to display.

To get the next page of results, perform another GetInstanceSnapshots request and specify the next page token using the pageToken parameter.

" } } }, @@ -5775,7 +6720,7 @@ }, "nextPageToken":{ "shape":"string", - "documentation":"

The token to advance to the next page of resutls from your request.

A next page token is not returned if there are no more results to display.

To get the next page of results, perform another GetInstances request and specify the next page token using the pageToken parameter.

" + "documentation":"

The token to advance to the next page of results from your request.

A next page token is not returned if there are no more results to display.

To get the next page of results, perform another GetInstances request and specify the next page token using the pageToken parameter.

" } } }, @@ -5816,7 +6761,7 @@ }, "nextPageToken":{ "shape":"string", - "documentation":"

The token to advance to the next page of resutls from your request.

A next page token is not returned if there are no more results to display.

To get the next page of results, perform another GetKeyPairs request and specify the next page token using the pageToken parameter.

" + "documentation":"

The token to advance to the next page of results from your request.

A next page token is not returned if there are no more results to display.

To get the next page of results, perform another GetKeyPairs request and specify the next page token using the pageToken parameter.

" } } }, @@ -5931,7 +6876,7 @@ }, "nextPageToken":{ "shape":"string", - "documentation":"

The token to advance to the next page of resutls from your request.

A next page token is not returned if there are no more results to display.

To get the next page of results, perform another GetLoadBalancers request and specify the next page token using the pageToken parameter.

" + "documentation":"

The token to advance to the next page of results from your request.

A next page token is not returned if there are no more results to display.

To get the next page of results, perform another GetLoadBalancers request and specify the next page token using the pageToken parameter.

" } } }, @@ -5982,7 +6927,7 @@ }, "nextPageToken":{ "shape":"string", - "documentation":"

The token to advance to the next page of resutls from your request.

A next page token is not returned if there are no more results to display.

To get the next page of results, perform another GetOperationsForResource request and specify the next page token using the pageToken parameter.

" + "documentation":"

The token to advance to the next page of results from your request.

A next page token is not returned if there are no more results to display.

To get the next page of results, perform another GetOperationsForResource request and specify the next page token using the pageToken parameter.

" } } }, @@ -6004,7 +6949,7 @@ }, "nextPageToken":{ "shape":"string", - "documentation":"

The token to advance to the next page of resutls from your request.

A next page token is not returned if there are no more results to display.

To get the next page of results, perform another GetOperations request and specify the next page token using the pageToken parameter.

" + "documentation":"

The token to advance to the next page of results from your request.

A next page token is not returned if there are no more results to display.

To get the next page of results, perform another GetOperations request and specify the next page token using the pageToken parameter.

" } } }, @@ -6017,7 +6962,7 @@ }, "includeRelationalDatabaseAvailabilityZones":{ "shape":"boolean", - "documentation":"

>A Boolean value indicating whether to also include Availability Zones for databases in your get regions request. Availability Zones are indicated with a letter (e.g., us-east-2a).

" + "documentation":"

A Boolean value indicating whether to also include Availability Zones for databases in your get regions request. Availability Zones are indicated with a letter (e.g., us-east-2a).

" } } }, @@ -6048,7 +6993,7 @@ }, "nextPageToken":{ "shape":"string", - "documentation":"

The token to advance to the next page of resutls from your request.

A next page token is not returned if there are no more results to display.

To get the next page of results, perform another GetRelationalDatabaseBlueprints request and specify the next page token using the pageToken parameter.

" + "documentation":"

The token to advance to the next page of results from your request.

A next page token is not returned if there are no more results to display.

To get the next page of results, perform another GetRelationalDatabaseBlueprints request and specify the next page token using the pageToken parameter.

" } } }, @@ -6070,7 +7015,7 @@ }, "nextPageToken":{ "shape":"string", - "documentation":"

The token to advance to the next page of resutls from your request.

A next page token is not returned if there are no more results to display.

To get the next page of results, perform another GetRelationalDatabaseBundles request and specify the next page token using the pageToken parameter.

" + "documentation":"

The token to advance to the next page of results from your request.

A next page token is not returned if there are no more results to display.

To get the next page of results, perform another GetRelationalDatabaseBundles request and specify the next page token using the pageToken parameter.

" } } }, @@ -6101,7 +7046,7 @@ }, "nextPageToken":{ "shape":"string", - "documentation":"

The token to advance to the next page of resutls from your request.

A next page token is not returned if there are no more results to display.

To get the next page of results, perform another GetRelationalDatabaseEvents request and specify the next page token using the pageToken parameter.

" + "documentation":"

The token to advance to the next page of results from your request.

A next page token is not returned if there are no more results to display.

To get the next page of results, perform another GetRelationalDatabaseEvents request and specify the next page token using the pageToken parameter.

" } } }, @@ -6279,7 +7224,7 @@ }, "nextPageToken":{ "shape":"string", - "documentation":"

The token to advance to the next page of resutls from your request.

A next page token is not returned if there are no more results to display.

To get the next page of results, perform another GetRelationalDatabaseParameters request and specify the next page token using the pageToken parameter.

" + "documentation":"

The token to advance to the next page of results from your request.

A next page token is not returned if there are no more results to display.

To get the next page of results, perform another GetRelationalDatabaseParameters request and specify the next page token using the pageToken parameter.

" } } }, @@ -6339,7 +7284,7 @@ }, "nextPageToken":{ "shape":"string", - "documentation":"

The token to advance to the next page of resutls from your request.

A next page token is not returned if there are no more results to display.

To get the next page of results, perform another GetRelationalDatabaseSnapshots request and specify the next page token using the pageToken parameter.

" + "documentation":"

The token to advance to the next page of results from your request.

A next page token is not returned if there are no more results to display.

To get the next page of results, perform another GetRelationalDatabaseSnapshots request and specify the next page token using the pageToken parameter.

" } } }, @@ -6361,7 +7306,7 @@ }, "nextPageToken":{ "shape":"string", - "documentation":"

The token to advance to the next page of resutls from your request.

A next page token is not returned if there are no more results to display.

To get the next page of results, perform another GetRelationalDatabases request and specify the next page token using the pageToken parameter.

" + "documentation":"

The token to advance to the next page of results from your request.

A next page token is not returned if there are no more results to display.

To get the next page of results, perform another GetRelationalDatabases request and specify the next page token using the pageToken parameter.

" } } }, @@ -6402,7 +7347,7 @@ }, "nextPageToken":{ "shape":"string", - "documentation":"

The token to advance to the next page of resutls from your request.

A next page token is not returned if there are no more results to display.

To get the next page of results, perform another GetStaticIps request and specify the next page token using the pageToken parameter.

" + "documentation":"

The token to advance to the next page of results from your request.

A next page token is not returned if there are no more results to display.

To get the next page of results, perform another GetStaticIps request and specify the next page token using the pageToken parameter.

" } } }, @@ -7896,7 +8841,14 @@ "DetachCertificateFromDistribution", "UpdateDistributionBundle", "CreateCertificate", - "DeleteCertificate" + "DeleteCertificate", + "CreateContainerService", + "UpdateContainerService", + "DeleteContainerService", + "CreateContainerServiceDeployment", + "CreateContainerServiceRegistryLogin", + "RegisterContainerImage", + "DeleteContainerImage" ] }, "Origin":{ @@ -8051,6 +9003,11 @@ "type":"list", "member":{"shape":"Port"} }, + "PortMap":{ + "type":"map", + "key":{"shape":"string"}, + "value":{"shape":"ContainerServiceProtocol"} + }, "PortState":{ "type":"string", "enum":[ @@ -8263,6 +9220,34 @@ "ap-northeast-2" ] }, + "RegisterContainerImageRequest":{ + "type":"structure", + "required":[ + "serviceName", + "label", + "digest" + ], + "members":{ + "serviceName":{ + "shape":"ContainerServiceName", + "documentation":"

The name of the container service for which to register a container image.

" + }, + "label":{ + "shape":"ContainerLabel", + "documentation":"

The label for the container image when it's registered to the container service.

Use a descriptive label that you can use to track the different versions of your registered container images.

Use the GetContainerImages action to return the container images registered to a Lightsail container service. The label is the <imagelabel> portion of the following image name example:

  • :container-service-1.<imagelabel>.1

If the name of your container service is mycontainerservice, and the label that you specify is mystaticwebsite, then the name of the registered container image will be :mycontainerservice.mystaticwebsite.1.

The number at the end of these image name examples represents the version of the registered container image. If you push and register another container image to the same Lightsail container service, with the same label, then the version number for the new registered container image will be 2. If you push and register another container image, the version number will be 3, and so on.

" + }, + "digest":{ + "shape":"string", + "documentation":"

The digest of the container image to be registered.

" + } + } + }, + "RegisterContainerImageResult":{ + "type":"structure", + "members":{ + "containerImage":{"shape":"ContainerImage"} + } + }, "RelationalDatabase":{ "type":"structure", "members":{ @@ -8775,6 +9760,7 @@ "ResourceType":{ "type":"string", "enum":[ + "ContainerService", "Instance", "StaticIp", "KeyPair", @@ -9123,6 +10109,41 @@ } } }, + "UpdateContainerServiceRequest":{ + "type":"structure", + "required":["serviceName"], + "members":{ + "serviceName":{ + "shape":"ContainerServiceName", + "documentation":"

The name of the container service to update.

" + }, + "power":{ + "shape":"ContainerServicePowerName", + "documentation":"

The power for the container service.

The power specifies the amount of memory, vCPUs, and base monthly cost of each node of the container service. The power and scale of a container service makes up its configured capacity. To determine the monthly price of your container service, multiply the base price of the power with the scale (the number of nodes) of the service.

Use the GetContainerServicePowers action to view the specifications of each power option.

" + }, + "scale":{ + "shape":"ContainerServiceScale", + "documentation":"

The scale for the container service.

The scale specifies the allocated compute nodes of the container service. The power and scale of a container service makes up its configured capacity. To determine the monthly price of your container service, multiply the base price of the power with the scale (the number of nodes) of the service.

" + }, + "isDisabled":{ + "shape":"boolean", + "documentation":"

A Boolean value to indicate whether the container service is disabled.

" + }, + "publicDomainNames":{ + "shape":"ContainerServicePublicDomains", + "documentation":"

The public domain names to use with the container service, such as example.com and www.example.com.

You can specify up to four public domain names for a container service. The domain names that you specify are used when you create a deployment with a container configured as the public endpoint of your container service.

If you don't specify public domain names, then you can use the default domain of the container service.

You must create and validate an SSL/TLS certificate before you can use public domain names with your container service. Use the CreateCertificate action to create a certificate for the public domain names you want to use with your container service.

You can specify public domain names using a string to array map as shown in the example later on this page.

" + } + } + }, + "UpdateContainerServiceResult":{ + "type":"structure", + "members":{ + "containerService":{ + "shape":"ContainerService", + "documentation":"

An object that describes a container service.

" + } + } + }, "UpdateDistributionBundleRequest":{ "type":"structure", "members":{ @@ -9327,5 +10348,5 @@ "string":{"type":"string"}, "timestamp":{"type":"timestamp"} }, - "documentation":"

Amazon Lightsail is the easiest way to get started with Amazon Web Services (AWS) for developers who need to build websites or web applications. It includes everything you need to launch your project quickly – instances (virtual private servers), managed databases, SSD-based block storage, static IP addresses, load balancers, content delivery network (CDN) distributions, DNS management of registered domains, and snapshots (backups) – for a low, predictable monthly price.

You can manage your Lightsail resources using the Lightsail console, Lightsail API, AWS Command Line Interface (AWS CLI), or SDKs. For more information about Lightsail concepts and tasks, see the Lightsail Dev Guide.

This API Reference provides detailed information about the actions, data types, parameters, and errors of the Lightsail service. For more information about the supported AWS Regions, endpoints, and service quotas for the Lightsail service, see Amazon Lightsail Endpoints and Quotas in the AWS General Reference.

" + "documentation":"

Amazon Lightsail is the easiest way to get started with Amazon Web Services (AWS) for developers who need to build websites or web applications. It includes everything you need to launch your project quickly - instances (virtual private servers), container services, managed databases, SSD-based block storage, static IP addresses, load balancers, content delivery network (CDN) distributions, DNS management of registered domains, and resource snapshots (backups) - for a low, predictable monthly price.

You can manage your Lightsail resources using the Lightsail console, Lightsail API, AWS Command Line Interface (AWS CLI), or SDKs. For more information about Lightsail concepts and tasks, see the Lightsail Dev Guide.

This API Reference provides detailed information about the actions, data types, parameters, and errors of the Lightsail service. For more information about the supported AWS Regions, endpoints, and service quotas of the Lightsail service, see Amazon Lightsail Endpoints and Quotas in the AWS General Reference.

" } diff --git a/services/lookoutvision/pom.xml b/services/lookoutvision/pom.xml new file mode 100644 index 000000000000..3d98ff7ea80f --- /dev/null +++ b/services/lookoutvision/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.40-SNAPSHOT + + lookoutvision + AWS Java SDK :: Services :: Lookout Vision + The AWS Java SDK for Lookout Vision module holds the client classes that are used for + communicating with Lookout Vision. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.lookoutvision + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/lookoutvision/src/main/resources/codegen-resources/paginators-1.json b/services/lookoutvision/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..7f89a609b333 --- /dev/null +++ b/services/lookoutvision/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,22 @@ +{ + "pagination": { + "ListDatasetEntries": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "DatasetEntries" + }, + "ListModels": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Models" + }, + "ListProjects": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Projects" + } + } +} diff --git a/services/lookoutvision/src/main/resources/codegen-resources/service-2.json b/services/lookoutvision/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..eda65e756d73 --- /dev/null +++ b/services/lookoutvision/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,1570 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2020-11-20", + "endpointPrefix":"lookoutvision", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"Amazon Lookout for Vision", + "serviceId":"LookoutVision", + "signatureVersion":"v4", + "signingName":"lookoutvision", + "uid":"lookoutvision-2020-11-20" + }, + "operations":{ + "CreateDataset":{ + "name":"CreateDataset", + "http":{ + "method":"POST", + "requestUri":"/2020-11-20/projects/{projectName}/datasets", + "responseCode":202 + }, + "input":{"shape":"CreateDatasetRequest"}, + "output":{"shape":"CreateDatasetResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

Creates a new dataset in an Amazon Lookout for Vision project. CreateDataset can create a training or a test dataset from a valid dataset source (DatasetSource).

If you want a single dataset project, specify train for the value of DatasetType.

To have a project with separate training and test datasets, call CreateDataset twice. On the first call, specify train for the value of DatasetType. On the second call, specify test for the value of DatasetType. of dataset with

" + }, + "CreateModel":{ + "name":"CreateModel", + "http":{ + "method":"POST", + "requestUri":"/2020-11-20/projects/{projectName}/models", + "responseCode":202 + }, + "input":{"shape":"CreateModelRequest"}, + "output":{"shape":"CreateModelResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

Creates a new version of a model within an an Amazon Lookout for Vision project. CreateModel is an asynchronous operation in which Amazon Lookout for Vision trains, tests, and evaluates a new version of a model.

To get the current status, check the Status field returned in the response from DescribeModel.

If the project has a single dataset, Amazon Lookout for Vision internally splits the dataset to create a training and a test dataset. If the project has a training and a test dataset, Lookout for Vision uses the respective datasets to train and test the model.

After training completes, the evaluation metrics are stored at the location specified in OutputConfig.

" + }, + "CreateProject":{ + "name":"CreateProject", + "http":{ + "method":"POST", + "requestUri":"/2020-11-20/projects" + }, + "input":{"shape":"CreateProjectRequest"}, + "output":{"shape":"CreateProjectResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

Creates an empty Amazon Lookout for Vision project. After you create the project, add a dataset by calling CreateDataset.

" + }, + "DeleteDataset":{ + "name":"DeleteDataset", + "http":{ + "method":"DELETE", + "requestUri":"/2020-11-20/projects/{projectName}/datasets/{datasetType}", + "responseCode":202 + }, + "input":{"shape":"DeleteDatasetRequest"}, + "output":{"shape":"DeleteDatasetResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Deletes an existing Amazon Lookout for Vision dataset.

If your the project has a single dataset, you must create a new dataset before you can create a model.

If you project has a training dataset and a test dataset consider the following.

  • If you delete the test dataset, your project reverts to a single dataset project. If you then train the model, Amazon Lookout for Vision internally splits the remaining dataset into a training and test dataset.

  • If you delete the training dataset, you must create a training dataset before you can create a model.

It might take a while to delete the dataset. To check the current status, check the Status field in the response from a call to DescribeDataset.

" + }, + "DeleteModel":{ + "name":"DeleteModel", + "http":{ + "method":"DELETE", + "requestUri":"/2020-11-20/projects/{projectName}/models/{modelVersion}", + "responseCode":202 + }, + "input":{"shape":"DeleteModelRequest"}, + "output":{"shape":"DeleteModelResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Deletes an Amazon Lookout for Vision model. You can't delete a running model. To stop a running model, use the StopModel operation.

" + }, + "DeleteProject":{ + "name":"DeleteProject", + "http":{ + "method":"DELETE", + "requestUri":"/2020-11-20/projects/{projectName}" + }, + "input":{"shape":"DeleteProjectRequest"}, + "output":{"shape":"DeleteProjectResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Deletes an Amazon Lookout for Vision project.

To delete a project, you must first delete each version of the model associated with the project. To delete a model use the DeleteModel operation.

The training and test datasets are deleted automatically for you. The images referenced by the training and test datasets aren't deleted.

" + }, + "DescribeDataset":{ + "name":"DescribeDataset", + "http":{ + "method":"GET", + "requestUri":"/2020-11-20/projects/{projectName}/datasets/{datasetType}" + }, + "input":{"shape":"DescribeDatasetRequest"}, + "output":{"shape":"DescribeDatasetResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Describe an Amazon Lookout for Vision dataset.

" + }, + "DescribeModel":{ + "name":"DescribeModel", + "http":{ + "method":"GET", + "requestUri":"/2020-11-20/projects/{projectName}/models/{modelVersion}" + }, + "input":{"shape":"DescribeModelRequest"}, + "output":{"shape":"DescribeModelResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Describes a version of an Amazon Lookout for Vision model.

" + }, + "DescribeProject":{ + "name":"DescribeProject", + "http":{ + "method":"GET", + "requestUri":"/2020-11-20/projects/{projectName}" + }, + "input":{"shape":"DescribeProjectRequest"}, + "output":{"shape":"DescribeProjectResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Describes an Amazon Lookout for Vision project.

" + }, + "DetectAnomalies":{ + "name":"DetectAnomalies", + "http":{ + "method":"POST", + "requestUri":"/2020-11-20/projects/{projectName}/models/{modelVersion}/detect" + }, + "input":{"shape":"DetectAnomaliesRequest"}, + "output":{"shape":"DetectAnomaliesResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Detects anomalies in an image that you supply.

The response from DetectAnomalies includes a boolean prediction that the image contains one or more anomalies and a confidence value for the prediction.

Before calling DetectAnomalies, you must first start your model with the StartModel operation. You are charged for the amount of time, in minutes, that a model runs and for the number of anomaly detection units that your model uses. If you are not using a model, use the StopModel operation to stop your model.

" + }, + "ListDatasetEntries":{ + "name":"ListDatasetEntries", + "http":{ + "method":"GET", + "requestUri":"/2020-11-20/projects/{projectName}/datasets/{datasetType}/entries" + }, + "input":{"shape":"ListDatasetEntriesRequest"}, + "output":{"shape":"ListDatasetEntriesResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Lists the JSON Lines within a dataset. An Amazon Lookout for Vision JSON Line contains the anomaly information for a single image, including the image location and the assigned label.

" + }, + "ListModels":{ + "name":"ListModels", + "http":{ + "method":"GET", + "requestUri":"/2020-11-20/projects/{projectName}/models" + }, + "input":{"shape":"ListModelsRequest"}, + "output":{"shape":"ListModelsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Lists the versions of a model in an Amazon Lookout for Vision project.

" + }, + "ListProjects":{ + "name":"ListProjects", + "http":{ + "method":"GET", + "requestUri":"/2020-11-20/projects" + }, + "input":{"shape":"ListProjectsRequest"}, + "output":{"shape":"ListProjectsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Lists the Amazon Lookout for Vision projects in your AWS account.

" + }, + "StartModel":{ + "name":"StartModel", + "http":{ + "method":"POST", + "requestUri":"/2020-11-20/projects/{projectName}/models/{modelVersion}/start", + "responseCode":202 + }, + "input":{"shape":"StartModelRequest"}, + "output":{"shape":"StartModelResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

Starts the running of the version of an Amazon Lookout for Vision model. Starting a model takes a while to complete. To check the current state of the model, use DescribeModel.

Once the model is running, you can detect custom labels in new images by calling DetectAnomalies.

You are charged for the amount of time that the model is running. To stop a running model, call StopModel.

" + }, + "StopModel":{ + "name":"StopModel", + "http":{ + "method":"POST", + "requestUri":"/2020-11-20/projects/{projectName}/models/{modelVersion}/stop", + "responseCode":202 + }, + "input":{"shape":"StopModelRequest"}, + "output":{"shape":"StopModelResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Stops a running model. The operation might take a while to complete. To check the current status, call DescribeModel.

" + }, + "UpdateDatasetEntries":{ + "name":"UpdateDatasetEntries", + "http":{ + "method":"PATCH", + "requestUri":"/2020-11-20/projects/{projectName}/datasets/{datasetType}/entries", + "responseCode":202 + }, + "input":{"shape":"UpdateDatasetEntriesRequest"}, + "output":{"shape":"UpdateDatasetEntriesResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Adds one or more JSON Line entries to a dataset. A JSON Line includes information about an image used for training or testing an Amazon Lookout for Vision model. The following is an example JSON Line.

Updating a dataset might take a while to complete. To check the current status, call DescribeDataset and check the Status field in the response.

" + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"ExceptionString"} + }, + "documentation":"

You are not authorized to perform the action.

", + "error":{"httpStatusCode":403}, + "exception":true + }, + "AnomalyClassFilter":{ + "type":"string", + "max":10, + "min":1, + "pattern":"(normal|anomaly)" + }, + "Boolean":{"type":"boolean"}, + "ClientToken":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[a-zA-Z0-9-]+$" + }, + "ConflictException":{ + "type":"structure", + "required":[ + "Message", + "ResourceId", + "ResourceType" + ], + "members":{ + "Message":{"shape":"ExceptionString"}, + "ResourceId":{ + "shape":"ExceptionString", + "documentation":"

The ID of the resource.

" + }, + "ResourceType":{ + "shape":"ResourceType", + "documentation":"

The type of the resource.

" + } + }, + "documentation":"

The update or deletion of a resource caused an inconsistent state.

", + "error":{"httpStatusCode":409}, + "exception":true + }, + "ContentType":{ + "type":"string", + "max":255, + "min":1, + "pattern":".*" + }, + "CreateDatasetRequest":{ + "type":"structure", + "required":[ + "ProjectName", + "DatasetType" + ], + "members":{ + "ProjectName":{ + "shape":"ProjectName", + "documentation":"

The name of the project in which you want to create a dataset.

", + "location":"uri", + "locationName":"projectName" + }, + "DatasetType":{ + "shape":"DatasetType", + "documentation":"

The type of the dataset. Specify train for a training dataset. Specify test for a test dataset.

" + }, + "DatasetSource":{ + "shape":"DatasetSource", + "documentation":"

The location of the manifest file that Amazon Lookout for Vision uses to create the dataset.

If you don't specify DatasetSource, an empty dataset is created and the operation synchronously returns. Later, you can add JSON Lines by calling UpdateDatasetEntries.

If you specify a value for DataSource, the manifest at the S3 location is validated and used to create the dataset. The call to CreateDataset is asynchronous and might take a while to complete. To find out the current status, Check the value of Status returned in a call to DescribeDataset.

" + }, + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

ClientToken is an idempotency token that ensures a call to CreateDataset completes only once. You choose the value to pass. For example, An issue, such as an network outage, might prevent you from getting a response from CreateDataset. In this case, safely retry your call to CreateDataset by using the same ClientToken parameter value. An error occurs if the other input parameters are not the same as in the first request. Using a different value for ClientToken is considered a new call to CreateDataset. An idempotency token is active for 8 hours.

", + "idempotencyToken":true, + "location":"header", + "locationName":"X-Amzn-Client-Token" + } + } + }, + "CreateDatasetResponse":{ + "type":"structure", + "members":{ + "DatasetMetadata":{ + "shape":"DatasetMetadata", + "documentation":"

Information about the dataset.

" + } + } + }, + "CreateModelRequest":{ + "type":"structure", + "required":[ + "ProjectName", + "OutputConfig" + ], + "members":{ + "ProjectName":{ + "shape":"ProjectName", + "documentation":"

The name of the project in which you want to create a model version.

", + "location":"uri", + "locationName":"projectName" + }, + "Description":{ + "shape":"ModelDescription", + "documentation":"

A description for the version of the model.

" + }, + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

ClientToken is an idempotency token that ensures a call to CreateModel completes only once. You choose the value to pass. For example, An issue, such as an network outage, might prevent you from getting a response from CreateModel. In this case, safely retry your call to CreateModel by using the same ClientToken parameter value. An error occurs if the other input parameters are not the same as in the first request. Using a different value for ClientToken is considered a new call to CreateModel. An idempotency token is active for 8 hours.

", + "idempotencyToken":true, + "location":"header", + "locationName":"X-Amzn-Client-Token" + }, + "OutputConfig":{ + "shape":"OutputConfig", + "documentation":"

The location where Amazon Lookout for Vision saves the training results.

" + }, + "KmsKeyId":{ + "shape":"KmsKeyId", + "documentation":"

The identifier of the AWS Key Management Service (AWS KMS) customer master key (CMK) to use for encypting the model. If this parameter is not specified, the model is encrypted by a key that AWS owns and manages.

" + } + } + }, + "CreateModelResponse":{ + "type":"structure", + "members":{ + "ModelMetadata":{ + "shape":"ModelMetadata", + "documentation":"

The response from a call to CreateModel.

" + } + } + }, + "CreateProjectRequest":{ + "type":"structure", + "required":["ProjectName"], + "members":{ + "ProjectName":{ + "shape":"ProjectName", + "documentation":"

S nsme for the project.

" + }, + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

ClientToken is an idempotency token that ensures a call to CreateProject completes only once. You choose the value to pass. For example, An issue, such as an network outage, might prevent you from getting a response from CreateProject. In this case, safely retry your call to CreateProject by using the same ClientToken parameter value. An error occurs if the other input parameters are not the same as in the first request. Using a different value for ClientToken is considered a new call to CreateProject. An idempotency token is active for 8 hours.

", + "idempotencyToken":true, + "location":"header", + "locationName":"X-Amzn-Client-Token" + } + } + }, + "CreateProjectResponse":{ + "type":"structure", + "members":{ + "ProjectMetadata":{ + "shape":"ProjectMetadata", + "documentation":"

Information about the project.

" + } + } + }, + "DatasetChanges":{ + "type":"blob", + "max":10485760, + "min":1 + }, + "DatasetDescription":{ + "type":"structure", + "members":{ + "ProjectName":{ + "shape":"ProjectName", + "documentation":"

The name of the project that contains the dataset.

" + }, + "DatasetType":{ + "shape":"DatasetType", + "documentation":"

The type of the dataset. The value train represents a training dataset or single dataset project. The value test represents a test dataset.

" + }, + "CreationTimestamp":{ + "shape":"DateTime", + "documentation":"

The Unix timestamp for the time and date that the dataset was created.

" + }, + "LastUpdatedTimestamp":{ + "shape":"DateTime", + "documentation":"

The Unix timestamp for the date and time that the dataset was last updated.

" + }, + "Status":{ + "shape":"DatasetStatus", + "documentation":"

The status of the dataset.

" + }, + "StatusMessage":{ + "shape":"DatasetStatusMessage", + "documentation":"

The status message for the dataset.

" + }, + "ImageStats":{ + "shape":"DatasetImageStats", + "documentation":"

" + } + }, + "documentation":"

The description for a dataset. For more information, see DescribeDataset.

" + }, + "DatasetEntry":{ + "type":"string", + "max":8192, + "min":2, + "pattern":"^\\{.*\\}$" + }, + "DatasetEntryList":{ + "type":"list", + "member":{"shape":"DatasetEntry"} + }, + "DatasetGroundTruthManifest":{ + "type":"structure", + "members":{ + "S3Object":{ + "shape":"InputS3Object", + "documentation":"

The S3 bucket location for the manifest file.

" + } + }, + "documentation":"

Location information about a manifest file. You can use a manifest file to create a dataset.

" + }, + "DatasetImageStats":{ + "type":"structure", + "members":{ + "Total":{ + "shape":"Integer", + "documentation":"

The total number of images in the dataset.

" + }, + "Labeled":{ + "shape":"Integer", + "documentation":"

The total number of labeled images.

" + }, + "Normal":{ + "shape":"Integer", + "documentation":"

The total number of images labeled as normal.

" + }, + "Anomaly":{ + "shape":"Integer", + "documentation":"

the total number of images labeled as an anomaly.

" + } + }, + "documentation":"

Statistics about the images in a dataset.

" + }, + "DatasetMetadata":{ + "type":"structure", + "members":{ + "DatasetType":{ + "shape":"DatasetType", + "documentation":"

The type of the dataset.

" + }, + "CreationTimestamp":{ + "shape":"DateTime", + "documentation":"

The Unix timestamp for the date and time that the dataset was created.

" + }, + "Status":{ + "shape":"DatasetStatus", + "documentation":"

The status for the dataset.

" + }, + "StatusMessage":{ + "shape":"DatasetStatusMessage", + "documentation":"

The status message for the dataset.

" + } + }, + "documentation":"

Sumary information for an Amazon Lookout for Vision dataset.

" + }, + "DatasetMetadataList":{ + "type":"list", + "member":{"shape":"DatasetMetadata"} + }, + "DatasetSource":{ + "type":"structure", + "members":{ + "GroundTruthManifest":{ + "shape":"DatasetGroundTruthManifest", + "documentation":"

Location information for the manifest file.

" + } + }, + "documentation":"

Information about the location of a manifest file that Amazon Lookout for Vision uses to to create a dataset.

" + }, + "DatasetStatus":{ + "type":"string", + "enum":[ + "CREATE_IN_PROGRESS", + "CREATE_COMPLETE", + "CREATE_FAILED", + "UPDATE_IN_PROGRESS", + "UPDATE_COMPLETE", + "UPDATE_FAILED_ROLLBACK_IN_PROGRESS", + "UPDATE_FAILED_ROLLBACK_COMPLETE", + "DELETE_IN_PROGRESS", + "DELETE_COMPLETE", + "DELETE_FAILED" + ] + }, + "DatasetStatusMessage":{"type":"string"}, + "DatasetType":{ + "type":"string", + "max":10, + "min":1, + "pattern":"train|test" + }, + "DateTime":{"type":"timestamp"}, + "DeleteDatasetRequest":{ + "type":"structure", + "required":[ + "ProjectName", + "DatasetType" + ], + "members":{ + "ProjectName":{ + "shape":"ProjectName", + "documentation":"

The name of the project that contains the dataset that you want to delete.

", + "location":"uri", + "locationName":"projectName" + }, + "DatasetType":{ + "shape":"DatasetType", + "documentation":"

The type of the dataset to delete. Specify train to delete the training dataset. Specify test to delete the test dataset. To delete the dataset in a single dataset project, specify train.

", + "location":"uri", + "locationName":"datasetType" + }, + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

ClientToken is an idempotency token that ensures a call to DeleteDataset completes only once. You choose the value to pass. For example, An issue, such as an network outage, might prevent you from getting a response from DeleteDataset. In this case, safely retry your call to DeleteDataset by using the same ClientToken parameter value. An error occurs if the other input parameters are not the same as in the first request. Using a different value for ClientToken is considered a new call to DeleteDataset. An idempotency token is active for 8 hours.

", + "idempotencyToken":true, + "location":"header", + "locationName":"X-Amzn-Client-Token" + } + } + }, + "DeleteDatasetResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteModelRequest":{ + "type":"structure", + "required":[ + "ProjectName", + "ModelVersion" + ], + "members":{ + "ProjectName":{ + "shape":"ProjectName", + "documentation":"

The name of the project that contains the model that you want to delete.

", + "location":"uri", + "locationName":"projectName" + }, + "ModelVersion":{ + "shape":"ModelVersion", + "documentation":"

The version of the model that you want to delete.

", + "location":"uri", + "locationName":"modelVersion" + }, + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

ClientToken is an idempotency token that ensures a call to DeleteModel completes only once. You choose the value to pass. For example, An issue, such as an network outage, might prevent you from getting a response from DeleteModel. In this case, safely retry your call to DeleteModel by using the same ClientToken parameter value. An error occurs if the other input parameters are not the same as in the first request. Using a different value for ClientToken is considered a new call to DeleteModel. An idempotency token is active for 8 hours.

", + "idempotencyToken":true, + "location":"header", + "locationName":"X-Amzn-Client-Token" + } + } + }, + "DeleteModelResponse":{ + "type":"structure", + "members":{ + "ModelArn":{ + "shape":"ModelArn", + "documentation":"

The Amazon Resource Name (ARN) of the model that was deleted.

" + } + } + }, + "DeleteProjectRequest":{ + "type":"structure", + "required":["ProjectName"], + "members":{ + "ProjectName":{ + "shape":"ProjectName", + "documentation":"

The name of the project to delete.

", + "location":"uri", + "locationName":"projectName" + }, + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

ClientToken is an idempotency token that ensures a call to DeleteProject completes only once. You choose the value to pass. For example, An issue, such as an network outage, might prevent you from getting a response from DeleteProject. In this case, safely retry your call to DeleteProject by using the same ClientToken parameter value. An error occurs if the other input parameters are not the same as in the first request. Using a different value for ClientToken is considered a new call to DeleteProject. An idempotency token is active for 8 hours.

", + "idempotencyToken":true, + "location":"header", + "locationName":"X-Amzn-Client-Token" + } + } + }, + "DeleteProjectResponse":{ + "type":"structure", + "members":{ + "ProjectArn":{ + "shape":"ProjectArn", + "documentation":"

The Amazon Resource Name (ARN) of the project that was deleted.

" + } + } + }, + "DescribeDatasetRequest":{ + "type":"structure", + "required":[ + "ProjectName", + "DatasetType" + ], + "members":{ + "ProjectName":{ + "shape":"ProjectName", + "documentation":"

The name of the project that contains the dataset that you want to describe.

", + "location":"uri", + "locationName":"projectName" + }, + "DatasetType":{ + "shape":"DatasetType", + "documentation":"

The type of the dataset to describe. Specify train to describe the training dataset. Specify test to describe the test dataset. If you have a single dataset project, specify train

", + "location":"uri", + "locationName":"datasetType" + } + } + }, + "DescribeDatasetResponse":{ + "type":"structure", + "members":{ + "DatasetDescription":{ + "shape":"DatasetDescription", + "documentation":"

The description of the requested dataset.

" + } + } + }, + "DescribeModelRequest":{ + "type":"structure", + "required":[ + "ProjectName", + "ModelVersion" + ], + "members":{ + "ProjectName":{ + "shape":"ProjectName", + "documentation":"

The project that contains the version of a model that you want to describe.

", + "location":"uri", + "locationName":"projectName" + }, + "ModelVersion":{ + "shape":"ModelVersion", + "documentation":"

The version of the model that you want to describe.

", + "location":"uri", + "locationName":"modelVersion" + } + } + }, + "DescribeModelResponse":{ + "type":"structure", + "members":{ + "ModelDescription":{ + "shape":"ModelDescription", + "documentation":"

Contains the description of the model.

" + } + } + }, + "DescribeProjectRequest":{ + "type":"structure", + "required":["ProjectName"], + "members":{ + "ProjectName":{ + "shape":"ProjectName", + "documentation":"

The name of the project that you want to describe.

", + "location":"uri", + "locationName":"projectName" + } + } + }, + "DescribeProjectResponse":{ + "type":"structure", + "members":{ + "ProjectDescription":{ + "shape":"ProjectDescription", + "documentation":"

The description of the project.

" + } + } + }, + "DetectAnomaliesRequest":{ + "type":"structure", + "required":[ + "ProjectName", + "ModelVersion", + "Body", + "ContentType" + ], + "members":{ + "ProjectName":{ + "shape":"ProjectName", + "documentation":"

The name of the project that contains the model version that you want to use.

", + "location":"uri", + "locationName":"projectName" + }, + "ModelVersion":{ + "shape":"ModelVersion", + "documentation":"

The version of the model that you want to use.

", + "location":"uri", + "locationName":"modelVersion" + }, + "Body":{ + "shape":"Stream", + "documentation":"

The unencrypted image bytes that you want to analyze.

" + }, + "ContentType":{ + "shape":"ContentType", + "documentation":"

The type of the image passed in Body. Valid values are image/png (PNG format images) and image/jpeg (JPG format images).

", + "location":"header", + "locationName":"content-type" + } + }, + "payload":"Body" + }, + "DetectAnomaliesResponse":{ + "type":"structure", + "members":{ + "DetectAnomalyResult":{ + "shape":"DetectAnomalyResult", + "documentation":"

The results of the DetectAnomalies operation.

" + } + } + }, + "DetectAnomalyResult":{ + "type":"structure", + "members":{ + "Source":{ + "shape":"ImageSource", + "documentation":"

The source of the image that was analyzed. direct means that the images was supplied from the local computer. No other values are supported.

" + }, + "IsAnomalous":{ + "shape":"Boolean", + "documentation":"

True if the image contains an anomaly, otherwise false.

" + }, + "Confidence":{ + "shape":"Float", + "documentation":"

The confidence that Amazon Lookout for Vision has in the accuracy of the prediction.

" + } + }, + "documentation":"

The prediction results from a call to DetectAnomalies.

" + }, + "ExceptionString":{"type":"string"}, + "Float":{"type":"float"}, + "ImageSource":{ + "type":"structure", + "members":{ + "Type":{ + "shape":"ImageSourceType", + "documentation":"

The type of the image.

" + } + }, + "documentation":"

The source for an image.

" + }, + "ImageSourceType":{ + "type":"string", + "pattern":"direct" + }, + "InferenceUnits":{ + "type":"integer", + "min":1 + }, + "InputS3Object":{ + "type":"structure", + "required":[ + "Bucket", + "Key" + ], + "members":{ + "Bucket":{ + "shape":"S3BucketName", + "documentation":"

The Amazon S3 bucket that contains the manifest.

" + }, + "Key":{ + "shape":"S3ObjectKey", + "documentation":"

The name and location of the manifest file withiin the bucket.

" + }, + "VersionId":{ + "shape":"S3ObjectVersion", + "documentation":"

The version ID of the bucket.

" + } + }, + "documentation":"

Amazon S3 Location information for an input manifest file.

" + }, + "Integer":{"type":"integer"}, + "InternalServerException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"ExceptionString"}, + "RetryAfterSeconds":{ + "shape":"RetryAfterSeconds", + "documentation":"

The period of time, in seconds, before the operation can be retried.

", + "location":"header", + "locationName":"Retry-After" + } + }, + "documentation":"

Amazon Lookout for Vision experienced a service issue. Try your call again.

", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "IsLabeled":{"type":"boolean"}, + "KmsKeyId":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"^[A-Za-z0-9][A-Za-z0-9:_/+=,@.-]{0,2048}$" + }, + "ListDatasetEntriesRequest":{ + "type":"structure", + "required":[ + "ProjectName", + "DatasetType" + ], + "members":{ + "ProjectName":{ + "shape":"ProjectName", + "documentation":"

The name of the project that contains the dataset that you want to list.

", + "location":"uri", + "locationName":"projectName" + }, + "DatasetType":{ + "shape":"DatasetType", + "documentation":"

The type of the dataset that you want to list. Specify train to list the training dataset. Specify test to list the test dataset. If you have a single dataset project, specify train.

", + "location":"uri", + "locationName":"datasetType" + }, + "Labeled":{ + "shape":"IsLabeled", + "documentation":"

Specify true to include labeled entries, otherwise specify false. If you don't specify a value, Lookout for Vision returns all entries.

", + "location":"querystring", + "locationName":"labeled" + }, + "AnomalyClass":{ + "shape":"AnomalyClassFilter", + "documentation":"

Specify normal to include only normal images. Specify anomaly to only include anomalous entries. If you don't specify a value, Amazon Lookout for Vision returns normal and anomalous images.

", + "location":"querystring", + "locationName":"anomalyClass" + }, + "BeforeCreationDate":{ + "shape":"DateTime", + "documentation":"

Only includes entries before the specified date in the response. For example, 2020-06-23T00:00:00.

", + "location":"querystring", + "locationName":"createdBefore" + }, + "AfterCreationDate":{ + "shape":"DateTime", + "documentation":"

Only includes entries after the specified date in the response. For example, 2020-06-23T00:00:00.

", + "location":"querystring", + "locationName":"createdAfter" + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

If the previous response was incomplete (because there is more data to retrieve), Amazon Lookout for Vision returns a pagination token in the response. You can use this pagination token to retrieve the next set of dataset entries.

", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"PageSize", + "documentation":"

The maximum number of results to return per paginated call. The largest value you can specify is 100. If you specify a value greater than 100, a ValidationException error occurs. The default value is 100.

", + "location":"querystring", + "locationName":"maxResults" + }, + "SourceRefContains":{ + "shape":"QueryString", + "documentation":"

Perform a \"contains\" search on the values of the source-ref key within the dataset. For example a value of \"IMG_17\" returns all JSON Lines where the source-ref key value matches *IMG_17*.

", + "location":"querystring", + "locationName":"sourceRefContains" + } + } + }, + "ListDatasetEntriesResponse":{ + "type":"structure", + "members":{ + "DatasetEntries":{ + "shape":"DatasetEntryList", + "documentation":"

A list of the entries (JSON Lines) within the dataset.

" + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

If the response is truncated, Amazon Lookout for Vision returns this token that you can use in the subsequent request to retrieve the next set ofdataset entries.

" + } + } + }, + "ListModelsRequest":{ + "type":"structure", + "required":["ProjectName"], + "members":{ + "ProjectName":{ + "shape":"ProjectName", + "documentation":"

The name of the project that contains the model versions that you want to list.

", + "location":"uri", + "locationName":"projectName" + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

If the previous response was incomplete (because there is more data to retrieve), Amazon Lookout for Vision returns a pagination token in the response. You can use this pagination token to retrieve the next set of models.

", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"PageSize", + "documentation":"

The maximum number of results to return per paginated call. The largest value you can specify is 100. If you specify a value greater than 100, a ValidationException error occurs. The default value is 100.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListModelsResponse":{ + "type":"structure", + "members":{ + "Models":{ + "shape":"ModelMetadataList", + "documentation":"

A list of model versions in the specified project.

" + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

If the response is truncated, Amazon Lookout for Vision returns this token that you can use in the subsequent request to retrieve the next set of models.

" + } + } + }, + "ListProjectsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

If the previous response was incomplete (because there is more data to retrieve), Amazon Lookout for Vision returns a pagination token in the response. You can use this pagination token to retrieve the next set of projects.

", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"PageSize", + "documentation":"

The maximum number of results to return per paginated call. The largest value you can specify is 100. If you specify a value greater than 100, a ValidationException error occurs. The default value is 100.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListProjectsResponse":{ + "type":"structure", + "members":{ + "Projects":{ + "shape":"ProjectMetadataList", + "documentation":"

A list of projects in your AWS account.

" + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

If the response is truncated, Amazon Lookout for Vision returns this token that you can use in the subsequent request to retrieve the next set of projects.

" + } + } + }, + "ModelArn":{"type":"string"}, + "ModelDescription":{ + "type":"structure", + "members":{ + "ModelVersion":{ + "shape":"ModelVersion", + "documentation":"

The version of the model

" + }, + "ModelArn":{ + "shape":"ModelArn", + "documentation":"

The Amazon Resource Name (ARN) of the model.

" + }, + "CreationTimestamp":{ + "shape":"DateTime", + "documentation":"

The unix timestamp for the date and time that the model was created.

" + }, + "Description":{ + "shape":"ModelDescriptionMessage", + "documentation":"

The description for the model.

" + }, + "Status":{ + "shape":"ModelStatus", + "documentation":"

The status of the model.

" + }, + "StatusMessage":{ + "shape":"ModelStatusMessage", + "documentation":"

The status message for the model.

" + }, + "Performance":{ + "shape":"ModelPerformance", + "documentation":"

Performance metrics for the model. Created during training.

" + }, + "OutputConfig":{ + "shape":"OutputConfig", + "documentation":"

The S3 location where Amazon Lookout for Vision saves model training files.

" + }, + "EvaluationManifest":{ + "shape":"OutputS3Object", + "documentation":"

The S3 location where Amazon Lookout for Vision saves the manifest file that was used to test the trained model and generate the performance scores.

" + }, + "EvaluationResult":{ + "shape":"OutputS3Object", + "documentation":"

The S3 location where Amazon Lookout for Vision saves the performance metrics.

" + }, + "EvaluationEndTimestamp":{ + "shape":"DateTime", + "documentation":"

The unix timestamp for the date and time that the evaluation ended.

" + }, + "KmsKeyId":{ + "shape":"KmsKeyId", + "documentation":"

The identifer for the AWS Key Management Service (AWS KMS) key that was used to encrypt the model during training.

" + } + }, + "documentation":"

Describes an Amazon Lookout for Vision model.

" + }, + "ModelDescriptionMessage":{ + "type":"string", + "max":500, + "min":1, + "pattern":"[0-9A-Za-z\\.\\-_]*" + }, + "ModelHostingStatus":{ + "type":"string", + "enum":[ + "RUNNING", + "STARTING", + "STOPPED", + "FAILED" + ] + }, + "ModelMetadata":{ + "type":"structure", + "members":{ + "CreationTimestamp":{ + "shape":"DateTime", + "documentation":"

The unix timestamp for the date and time that the model was created.

" + }, + "ModelVersion":{ + "shape":"ModelVersion", + "documentation":"

The version of the model.

" + }, + "ModelArn":{ + "shape":"ModelArn", + "documentation":"

The Amazon Resource Name (ARN) of the model.

" + }, + "Description":{ + "shape":"ModelDescriptionMessage", + "documentation":"

The description for the model.

" + }, + "Status":{ + "shape":"ModelStatus", + "documentation":"

The status of the model.

" + }, + "StatusMessage":{ + "shape":"ModelStatusMessage", + "documentation":"

The status message for the model.

" + }, + "Performance":{ + "shape":"ModelPerformance", + "documentation":"

Performance metrics for the model. Created during training.

" + } + }, + "documentation":"

Describes an Amazon Lookout for Vision model.

" + }, + "ModelMetadataList":{ + "type":"list", + "member":{"shape":"ModelMetadata"} + }, + "ModelPerformance":{ + "type":"structure", + "members":{ + "F1Score":{ + "shape":"Float", + "documentation":"

The overall F1 score metric for the trained model.

" + }, + "Recall":{ + "shape":"Float", + "documentation":"

The overall recall metric value for the trained model.

" + }, + "Precision":{ + "shape":"Float", + "documentation":"

The overall precision metric value for the trained model.

" + } + }, + "documentation":"

Information about the evaluation performance of a trained model.

" + }, + "ModelStatus":{ + "type":"string", + "enum":[ + "TRAINING", + "TRAINED", + "TRAINING_FAILED", + "STARTING_HOSTING", + "HOSTED", + "HOSTING_FAILED", + "STOPPING_HOSTING", + "SYSTEM_UPDATING", + "DELETING" + ] + }, + "ModelStatusMessage":{"type":"string"}, + "ModelVersion":{ + "type":"string", + "max":10, + "min":1, + "pattern":"([1-9][0-9]*|latest)" + }, + "OutputConfig":{ + "type":"structure", + "required":["S3Location"], + "members":{ + "S3Location":{ + "shape":"S3Location", + "documentation":"

The S3 location for the output.

" + } + }, + "documentation":"

The S3 location where Amazon Lookout for Vision saves model training files.

" + }, + "OutputS3Object":{ + "type":"structure", + "required":[ + "Bucket", + "Key" + ], + "members":{ + "Bucket":{ + "shape":"S3BucketName", + "documentation":"

The bucket that contains the training output.

" + }, + "Key":{ + "shape":"S3ObjectKey", + "documentation":"

The location of the training output in the bucket.

" + } + }, + "documentation":"

The S3 location where Amazon Lookout for Vision saves training output.

" + }, + "PageSize":{ + "type":"integer", + "max":100, + "min":1 + }, + "PaginationToken":{ + "type":"string", + "max":2048, + "pattern":"^[a-zA-Z0-9\\/\\+\\=]{0,2048}$" + }, + "ProjectArn":{"type":"string"}, + "ProjectDescription":{ + "type":"structure", + "members":{ + "ProjectArn":{ + "shape":"ProjectArn", + "documentation":"

The Amazon Resource Name (ARN) of the project.

" + }, + "ProjectName":{ + "shape":"ProjectName", + "documentation":"

The name of the project.

" + }, + "CreationTimestamp":{ + "shape":"DateTime", + "documentation":"

The unix timestamp for the date and time that the project was created.

" + }, + "Datasets":{ + "shape":"DatasetMetadataList", + "documentation":"

A list of datasets in the project.

" + } + }, + "documentation":"

Describe an Amazon Lookout for Vision project. For more information, see DescribeProject.

" + }, + "ProjectMetadata":{ + "type":"structure", + "members":{ + "ProjectArn":{ + "shape":"ProjectArn", + "documentation":"

The Amazon Resource Name (ARN) of the project.

" + }, + "ProjectName":{ + "shape":"ProjectName", + "documentation":"

The name of the project.

" + }, + "CreationTimestamp":{ + "shape":"DateTime", + "documentation":"

The unix timestamp for the date and time that the project was created.

" + } + }, + "documentation":"

Metadata about an Amazon Lookout for Vision project.

" + }, + "ProjectMetadataList":{ + "type":"list", + "member":{"shape":"ProjectMetadata"} + }, + "ProjectName":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[a-zA-Z0-9][a-zA-Z0-9_\\-]*" + }, + "QueryString":{ + "type":"string", + "max":2048, + "min":1, + "pattern":".*\\S.*" + }, + "ResourceNotFoundException":{ + "type":"structure", + "required":[ + "Message", + "ResourceId", + "ResourceType" + ], + "members":{ + "Message":{"shape":"ExceptionString"}, + "ResourceId":{ + "shape":"ExceptionString", + "documentation":"

The ID of the resource.

" + }, + "ResourceType":{ + "shape":"ResourceType", + "documentation":"

The type of the resource.

" + } + }, + "documentation":"

The resource could not be found.

", + "error":{"httpStatusCode":404}, + "exception":true + }, + "ResourceType":{ + "type":"string", + "enum":[ + "PROJECT", + "DATASET", + "MODEL", + "TRIAL" + ] + }, + "RetryAfterSeconds":{"type":"integer"}, + "S3BucketName":{ + "type":"string", + "max":63, + "min":3, + "pattern":"[0-9A-Za-z\\.\\-_]*" + }, + "S3KeyPrefix":{ + "type":"string", + "max":1024, + "pattern":"^([a-zA-Z0-9!_.*'()-][/a-zA-Z0-9!_.*'()-]*)?$" + }, + "S3Location":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"S3BucketName", + "documentation":"

The S3 bucket that contain the manifest file.

" + }, + "Prefix":{ + "shape":"S3KeyPrefix", + "documentation":"

The path and name of the manifest file with the S3 bucket.

" + } + }, + "documentation":"

Information about the location of a manifest file.

" + }, + "S3ObjectKey":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"^([a-zA-Z0-9!_.*'()-][/a-zA-Z0-9!_.*'()-]*)?$" + }, + "S3ObjectVersion":{ + "type":"string", + "max":1024, + "min":1, + "pattern":".*" + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "required":[ + "Message", + "QuotaCode", + "ServiceCode" + ], + "members":{ + "Message":{"shape":"ExceptionString"}, + "ResourceId":{ + "shape":"ExceptionString", + "documentation":"

The ID of the resource.

" + }, + "ResourceType":{ + "shape":"ResourceType", + "documentation":"

The type of the resource.

" + }, + "QuotaCode":{ + "shape":"ExceptionString", + "documentation":"

The quota code.

" + }, + "ServiceCode":{ + "shape":"ExceptionString", + "documentation":"

The service code.

" + } + }, + "documentation":"

A service quota was exceeded the allowed limit. For more information, see Limits in Amazon Lookout for Vision in the Amazon Lookout for Vision Developer Guide.

", + "error":{"httpStatusCode":402}, + "exception":true + }, + "StartModelRequest":{ + "type":"structure", + "required":[ + "ProjectName", + "ModelVersion", + "MinInferenceUnits" + ], + "members":{ + "ProjectName":{ + "shape":"ProjectName", + "documentation":"

The name of the project that contains the model that you want to start.

", + "location":"uri", + "locationName":"projectName" + }, + "ModelVersion":{ + "shape":"ModelVersion", + "documentation":"

The version of the model that you want to start.

", + "location":"uri", + "locationName":"modelVersion" + }, + "MinInferenceUnits":{ + "shape":"InferenceUnits", + "documentation":"

The minimum number of inference units to use. A single inference unit represents 1 hour of processing and can support up to 5 Transaction Pers Second (TPS). Use a higher number to increase the TPS throughput of your model. You are charged for the number of inference units that you use.

" + }, + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

ClientToken is an idempotency token that ensures a call to StartModel completes only once. You choose the value to pass. For example, An issue, such as an network outage, might prevent you from getting a response from StartModel. In this case, safely retry your call to StartModel by using the same ClientToken parameter value. An error occurs if the other input parameters are not the same as in the first request. Using a different value for ClientToken is considered a new call to StartModel. An idempotency token is active for 8 hours.

", + "idempotencyToken":true, + "location":"header", + "locationName":"X-Amzn-Client-Token" + } + } + }, + "StartModelResponse":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"ModelHostingStatus", + "documentation":"

The current running status of the model.

" + } + } + }, + "StopModelRequest":{ + "type":"structure", + "required":[ + "ProjectName", + "ModelVersion" + ], + "members":{ + "ProjectName":{ + "shape":"ProjectName", + "documentation":"

The name of the project that contains the model that you want to stop.

", + "location":"uri", + "locationName":"projectName" + }, + "ModelVersion":{ + "shape":"ModelVersion", + "documentation":"

The version of the model that you want to stop.

", + "location":"uri", + "locationName":"modelVersion" + }, + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

ClientToken is an idempotency token that ensures a call to StopModel completes only once. You choose the value to pass. For example, An issue, such as an network outage, might prevent you from getting a response from StopModel. In this case, safely retry your call to StopModel by using the same ClientToken parameter value. An error occurs if the other input parameters are not the same as in the first request. Using a different value for ClientToken is considered a new call to StopModel. An idempotency token is active for 8 hours.

", + "idempotencyToken":true, + "location":"header", + "locationName":"X-Amzn-Client-Token" + } + } + }, + "StopModelResponse":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"ModelHostingStatus", + "documentation":"

The status of the model.

" + } + } + }, + "Stream":{ + "type":"blob", + "requiresLength":true, + "streaming":true + }, + "ThrottlingException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"ExceptionString"}, + "QuotaCode":{ + "shape":"ExceptionString", + "documentation":"

The quota code.

" + }, + "ServiceCode":{ + "shape":"ExceptionString", + "documentation":"

The service code.

" + }, + "RetryAfterSeconds":{ + "shape":"RetryAfterSeconds", + "documentation":"

The period of time, in seconds, before the operation can be retried.

", + "location":"header", + "locationName":"Retry-After" + } + }, + "documentation":"

Amazon Lookout for Vision is temporarily unable to process the request. Try your call again.

", + "error":{"httpStatusCode":429}, + "exception":true + }, + "UpdateDatasetEntriesRequest":{ + "type":"structure", + "required":[ + "ProjectName", + "DatasetType", + "Changes" + ], + "members":{ + "ProjectName":{ + "shape":"ProjectName", + "documentation":"

The name of the project that contains the dataset that you want to update.

", + "location":"uri", + "locationName":"projectName" + }, + "DatasetType":{ + "shape":"DatasetType", + "documentation":"

The type of the dataset that you want to update. Specify train to update the training dataset. Specify test to update the test dataset. If you have a single dataset project, specify train.

", + "location":"uri", + "locationName":"datasetType" + }, + "Changes":{ + "shape":"DatasetChanges", + "documentation":"

The entries to add to the dataset.

" + }, + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

ClientToken is an idempotency token that ensures a call to UpdateDatasetEntries completes only once. You choose the value to pass. For example, An issue, such as an network outage, might prevent you from getting a response from UpdateDatasetEntries. In this case, safely retry your call to UpdateDatasetEntries by using the same ClientToken parameter value. An error occurs if the other input parameters are not the same as in the first request. Using a different value for ClientToken is considered a new call to UpdateDatasetEntries. An idempotency token is active for 8 hours.

", + "idempotencyToken":true, + "location":"header", + "locationName":"X-Amzn-Client-Token" + } + } + }, + "UpdateDatasetEntriesResponse":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"DatasetStatus", + "documentation":"

The status of the dataset update.

" + } + } + }, + "ValidationException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"ExceptionString"} + }, + "documentation":"

An input validation error occured. For example, invalid characters in a project name, or if a pagination token is invalid.

", + "error":{"httpStatusCode":400}, + "exception":true + } + }, + "documentation":"

This is the Amazon Lookout for Vision API Reference. It provides descriptions of actions, data types, common parameters, and common errors.

Amazon Lookout for Vision enables you to find visual defects in industrial products, accurately and at scale. It uses computer vision to identify missing components in an industrial product, damage to vehicles or structures, irregularities in production lines, and even minuscule defects in silicon wafers — or any other physical item where quality is important such as a missing capacitor on printed circuit boards.

" +} diff --git a/services/machinelearning/pom.xml b/services/machinelearning/pom.xml index c192c7b5dfd0..ab33aed70b31 100644 --- a/services/machinelearning/pom.xml +++ b/services/machinelearning/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT machinelearning AWS Java SDK :: Services :: Amazon Machine Learning diff --git a/services/macie/pom.xml b/services/macie/pom.xml index c22259bf915e..8f99bd67a355 100644 --- a/services/macie/pom.xml +++ b/services/macie/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT macie AWS Java SDK :: Services :: Macie diff --git a/services/macie2/pom.xml b/services/macie2/pom.xml index e627adf142db..6e3d533cea4c 100644 --- a/services/macie2/pom.xml +++ b/services/macie2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT macie2 AWS Java SDK :: Services :: Macie2 diff --git a/services/macie2/src/main/resources/codegen-resources/service-2.json b/services/macie2/src/main/resources/codegen-resources/service-2.json index 98d93213b360..fdd8ea42364c 100644 --- a/services/macie2/src/main/resources/codegen-resources/service-2.json +++ b/services/macie2/src/main/resources/codegen-resources/service-2.json @@ -698,7 +698,7 @@ "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" } ], - "documentation": "

Retrieves information about the status and settings for a classification job.

" + "documentation": "

Retrieves the status and settings for a classification job.

" }, "DescribeOrganizationConfiguration": { "name": "DescribeOrganizationConfiguration", @@ -744,7 +744,7 @@ "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" } ], - "documentation": "

Retrieves information about the Amazon Macie configuration settings for an AWS organization.

" + "documentation": "

Retrieves the Amazon Macie configuration settings for an AWS organization.

" }, "DisableMacie": { "name": "DisableMacie", @@ -836,7 +836,7 @@ "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" } ], - "documentation": "

Disables an account as a delegated administrator of Amazon Macie for an AWS organization.

" + "documentation": "

Disables an account as the delegated Amazon Macie administrator account for an AWS organization.

" }, "DisassociateFromMasterAccount": { "name": "DisassociateFromMasterAccount", @@ -1020,7 +1020,7 @@ "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" } ], - "documentation": "

Enables an account as a delegated administrator of Amazon Macie for an AWS organization.

" + "documentation": "

Designates an account as the delegated Amazon Macie administrator account for an AWS organization.

" }, "GetBucketStatistics": { "name": "GetBucketStatistics", @@ -1158,7 +1158,7 @@ "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" } ], - "documentation": "

Retrieves information about the criteria and other settings for a custom data identifier.

" + "documentation": "

Retrieves the criteria and other settings for a custom data identifier.

" }, "GetFindingStatistics": { "name": "GetFindingStatistics", @@ -1250,7 +1250,7 @@ "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" } ], - "documentation": "

Retrieves information about one or more findings.

" + "documentation": "

Retrieves the details of one or more findings.

" }, "GetFindingsFilter": { "name": "GetFindingsFilter", @@ -1296,7 +1296,7 @@ "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" } ], - "documentation": "

Retrieves information about the criteria and other settings for a findings filter.

" + "documentation": "

Retrieves the criteria and other settings for a findings filter.

" }, "GetInvitationsCount": { "name": "GetInvitationsCount", @@ -1388,7 +1388,7 @@ "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" } ], - "documentation": "

Retrieves information about the current status and configuration settings for an Amazon Macie account.

" + "documentation": "

Retrieves the current status and configuration settings for an Amazon Macie account.

" }, "GetMasterAccount": { "name": "GetMasterAccount", @@ -1894,7 +1894,7 @@ "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" } ], - "documentation": "

Retrieves information about the account that's designated as the delegated administrator of Amazon Macie for an AWS organization.

" + "documentation": "

Retrieves information about the delegated Amazon Macie administrator account for an AWS organization.

" }, "ListTagsForResource": { "name": "ListTagsForResource", @@ -2083,7 +2083,7 @@ "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" } ], - "documentation": "

Cancels a classification job.

" + "documentation": "

Changes the status of a classification job.

" }, "UpdateFindingsFilter": { "name": "UpdateFindingsFilter", @@ -2267,7 +2267,7 @@ "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" } ], - "documentation": "

Updates Amazon Macie configuration settings for an AWS organization.

" + "documentation": "

Updates the Amazon Macie configuration settings for an AWS organization.

" } }, "shapes": { @@ -2370,11 +2370,11 @@ "documentation": "

The current status of the account as a delegated administrator of Amazon Macie for the organization.

" } }, - "documentation": "

Provides information about an account that's designated as a delegated administrator of Amazon Macie for an AWS organization.

" + "documentation": "

Provides information about the delegated Amazon Macie administrator account for an AWS organization.

" }, "AdminStatus": { "type": "string", - "documentation": "

The current status of an account as a delegated administrator of Amazon Macie for an AWS organization.

", + "documentation": "

The current status of an account as the delegated Amazon Macie administrator account for an AWS organization.

", "enum": [ "ENABLED", "DISABLING_IN_PROGRESS" @@ -2568,6 +2568,11 @@ "shape": "__long", "locationName": "publiclyWritable", "documentation": "

The total number of buckets that allow the general public to have write access to the bucket.

" + }, + "unknown": { + "shape": "__long", + "locationName": "unknown", + "documentation": "

The total number of buckets that Amazon Macie wasn't able to evaluate permissions settings for. Macie can't determine whether these buckets are publicly accessible.

" } }, "documentation": "

Provides information about the number of S3 buckets that are publicly accessible based on a combination of permissions settings for each bucket.

" @@ -2578,12 +2583,12 @@ "kmsManaged": { "shape": "__long", "locationName": "kmsManaged", - "documentation": "

The total number of buckets that use an AWS Key Management Service (AWS KMS) customer master key (CMK) to encrypt objects. These buckets use AWS KMS AWS-managed (AWS-KMS) encryption or AWS KMS customer-managed (SSE-KMS) encryption.

" + "documentation": "

The total number of buckets that use an AWS Key Management Service (AWS KMS) customer master key (CMK) to encrypt objects. These buckets use AWS managed AWS KMS (AWS-KMS) encryption or customer managed AWS KMS (SSE-KMS) encryption.

" }, "s3Managed": { "shape": "__long", "locationName": "s3Managed", - "documentation": "

The total number of buckets that use an Amazon S3-managed key to encrypt objects. These buckets use Amazon S3-managed (SSE-S3) encryption.

" + "documentation": "

The total number of buckets that use an Amazon S3 managed key to encrypt objects. These buckets use Amazon S3 managed (SSE-S3) encryption.

" }, "unencrypted": { "shape": "__long", @@ -2609,7 +2614,12 @@ "notShared": { "shape": "__long", "locationName": "notShared", - "documentation": "

The total number of buckets that aren't shared with any other AWS accounts.

" + "documentation": "

The total number of buckets that aren't shared with other AWS accounts.

" + }, + "unknown": { + "shape": "__long", + "locationName": "unknown", + "documentation": "

The total number of buckets that Amazon Macie wasn't able to evaluate shared access settings for. Macie can't determine whether these buckets are shared with other AWS accounts.

" } }, "documentation": "

Provides information about the number of S3 buckets that are shared with other AWS accounts.

" @@ -2712,12 +2722,17 @@ "classifiableObjectCount": { "shape": "__long", "locationName": "classifiableObjectCount", - "documentation": "

The total number of objects that Amazon Macie can analyze in the bucket. These objects use a supported file or storage format and storage class.

" + "documentation": "

The total number of objects that Amazon Macie can analyze in the bucket. These objects use a supported storage class and have a file name extension for a supported file or storage format.

" }, "classifiableSizeInBytes": { "shape": "__long", "locationName": "classifiableSizeInBytes", - "documentation": "

The total storage size, in bytes, of the objects that Amazon Macie can analyze in the bucket. These objects use a supported file or storage format and storage class.

" + "documentation": "

The total storage size, in bytes, of the objects that Amazon Macie can analyze in the bucket. These objects use a supported storage class and have a file name extension for a supported file or storage format.

" + }, + "jobDetails": { + "shape": "JobDetails", + "locationName": "jobDetails", + "documentation": "

Specifies whether any one-time or recurring classification jobs are configured to analyze data in the bucket, and, if so, the details of the job that ran most recently.

" }, "lastUpdated": { "shape": "__timestampIso8601", @@ -2752,7 +2767,7 @@ "sharedAccess": { "shape": "SharedAccess", "locationName": "sharedAccess", - "documentation": "

Specifies whether the bucket is shared with another AWS account. Possible values are:

  • EXTERNAL - The bucket is shared with an AWS account that isn\u2019t part of the same Amazon Macie organization.

  • INTERNAL - The bucket is shared with an AWS account that's part of the same Amazon Macie organization.

  • NOT_SHARED - The bucket isn't shared with other AWS accounts.

  • UNKNOWN - Amazon Macie wasn't able to evaluate the shared access settings for the bucket.

" + "documentation": "

Specifies whether the bucket is shared with another AWS account. Possible values are:

  • EXTERNAL - The bucket is shared with an AWS account that isn't part of the same Amazon Macie organization.

  • INTERNAL - The bucket is shared with an AWS account that's part of the same Amazon Macie organization.

  • NOT_SHARED - The bucket isn't shared with other AWS accounts.

  • UNKNOWN - Amazon Macie wasn't able to evaluate the shared access settings for the bucket.

" }, "sizeInBytes": { "shape": "__long", @@ -2772,12 +2787,12 @@ "unclassifiableObjectCount": { "shape": "ObjectLevelStatistics", "locationName": "unclassifiableObjectCount", - "documentation": "

The total number of objects that Amazon Macie can't analyze in the bucket. These objects use an unsupported file or storage format or storage class.

" + "documentation": "

The total number of objects that Amazon Macie can't analyze in the bucket. These objects don't use a supported storage class or don't have a file name extension for a supported file or storage format.

" }, "unclassifiableObjectSizeInBytes": { "shape": "ObjectLevelStatistics", "locationName": "unclassifiableObjectSizeInBytes", - "documentation": "

The total storage size, in bytes, of the objects that Amazon Macie can't analyze in the bucket. These objects use an unsupported file or storage format or storage class.

" + "documentation": "

The total storage size, in bytes, of the objects that Amazon Macie can't analyze in the bucket. These objects don't use a supported storage class or don't have a file name extension for a supported file or storage format.

" }, "versioning": { "shape": "__boolean", @@ -2851,13 +2866,46 @@ }, "documentation": "

Specifies criteria for sorting the results of a query for information about S3 buckets.

" }, + "Cell": { + "type": "structure", + "members": { + "cellReference": { + "shape": "__string", + "locationName": "cellReference", + "documentation": "

The location of the cell, as an absolute cell reference, that contains the data. For example, Sheet2!C5 for cell C5 on Sheet2 in a Microsoft Excel workbook. This value is null for CSV and TSV files.

" + }, + "column": { + "shape": "__long", + "locationName": "column", + "documentation": "

The column number of the column that contains the data. For a Microsoft Excel workbook, this value correlates to the alphabetical character(s) for a column identifier. For example, 1 for column A, 2 for column B, and so on.

" + }, + "columnName": { + "shape": "__string", + "locationName": "columnName", + "documentation": "

The name of the column that contains the data, if available.

" + }, + "row": { + "shape": "__long", + "locationName": "row", + "documentation": "

The row number of the row that contains the data.

" + } + }, + "documentation": "

Specifies the location of an occurrence of sensitive data in a Microsoft Excel workbook, CSV file, or TSV file.

" + }, + "Cells": { + "type": "list", + "documentation": "

Specifies the location of occurrences of sensitive data in a Microsoft Excel workbook, CSV file, or TSV file.

", + "member": { + "shape": "Cell" + } + }, "ClassificationDetails": { "type": "structure", "members": { "detailedResultsLocation": { "shape": "__string", "locationName": "detailedResultsLocation", - "documentation": "

The path to the folder or file (in Amazon S3) that contains the corresponding sensitive data discovery results for the finding. If a finding applies to a large archive or compressed file, this is a path to a folder. Otherwise, this is a path to a file.

" + "documentation": "

The path to the folder or file (in Amazon S3) that contains the corresponding sensitive data discovery result for the finding. If a finding applies to a large archive or compressed file, this value is the path to a folder. Otherwise, this value is the path to a file.

" }, "jobArn": { "shape": "__string", @@ -2891,20 +2939,25 @@ "ClassificationResult": { "type": "structure", "members": { + "additionalOccurrences": { + "shape": "__boolean", + "locationName": "additionalOccurrences", + "documentation": "

Specifies whether Amazon Macie detected additional occurrences of sensitive data in the S3 object. A finding includes location data for a maximum of 15 occurrences of sensitive data.

This value can help you determine whether to investigate additional occurrences of sensitive data in an object. You can do this by referring to the corresponding sensitive data discovery result for the finding (ClassificationDetails.detailedResultsLocation).

" + }, "customDataIdentifiers": { "shape": "CustomDataIdentifiers", "locationName": "customDataIdentifiers", - "documentation": "

The number of occurrences of the data that produced the finding, and the custom data identifiers that detected the data.

" + "documentation": "

The custom data identifiers that detected the sensitive data and the number of occurrences of the data that they detected.

" }, "mimeType": { "shape": "__string", "locationName": "mimeType", - "documentation": "

The type of content, expressed as a MIME type, that the finding applies to. For example, application/gzip, for a GNU Gzip compressed archive file, or application/pdf, for an Adobe PDF file.

" + "documentation": "

The type of content, as a MIME type, that the finding applies to. For example, application/gzip, for a GNU Gzip compressed archive file, or application/pdf, for an Adobe Portable Document Format file.

" }, "sensitiveData": { "shape": "SensitiveData", "locationName": "sensitiveData", - "documentation": "

The category and number of occurrences of the sensitive data that produced the finding.

" + "documentation": "

The category, types, and number of occurrences of the sensitive data that produced the finding.

" }, "sizeClassified": { "shape": "__long", @@ -2917,7 +2970,7 @@ "documentation": "

The status of the finding.

" } }, - "documentation": "

Provides detailed information about a sensitive data finding, including the types and number of occurrences of the sensitive data that was found.

" + "documentation": "

Provides the details of a sensitive data finding, including the types, number of occurrences, and locations of the sensitive data that was detected.

" }, "ClassificationResultStatus": { "type": "structure", @@ -2925,7 +2978,7 @@ "code": { "shape": "__string", "locationName": "code", - "documentation": "

The status of the finding. Possible values are:

  • COMPLETE - Amazon Macie successfully completed its analysis of the object that the finding applies to.

  • PARTIAL - Macie was able to analyze only a subset of the data in the object that the finding applies to. For example, the object is a compressed or archive file that contains files in an unsupported format.

  • SKIPPED - Macie wasn't able to analyze the object that the finding applies to. For example, the object is a malformed file or a file that's in an unsupported format.

" + "documentation": "

The status of the finding. Possible values are:

  • COMPLETE - Amazon Macie successfully completed its analysis of the object that the finding applies to.

  • PARTIAL - Macie analyzed only a subset of the data in the object that the finding applies to. For example, the object is an archive file that contains files in an unsupported format.

  • SKIPPED - Macie wasn't able to analyze the object that the finding applies to. For example, the object is a malformed file or a file that uses an unsupported format.

" }, "reason": { "shape": "__string", @@ -2997,7 +3050,7 @@ "scheduleFrequency": { "shape": "JobScheduleFrequency", "locationName": "scheduleFrequency", - "documentation": "

The recurrence pattern for running the job. To run the job only once, don't specify a value for this property and set the value of the jobType property to ONE_TIME.

" + "documentation": "

The recurrence pattern for running the job. To run the job only once, don't specify a value for this property and set the value for the jobType property to ONE_TIME.

" }, "tags": { "shape": "TagMap", @@ -3237,6 +3290,11 @@ "locationName": "eq", "documentation": "

An equal to condition to apply to a specified property value for findings.

" }, + "eqExactMatch": { + "shape": "__listOf__string", + "locationName": "eqExactMatch", + "documentation": "

A condition that requires an array field to exactly match the specified property values. You can use this operator with the following properties: customDataIdentifiers.detections.arn, customDataIdentifiers.detections.name, resourcesAffected.s3Bucket.tags.key, resourcesAffected.s3Bucket.tags.value, resourcesAffected.s3Object.tags.key, resourcesAffected.s3Object.tags.value, sensitiveData.category, and sensitiveData.detections.type.

" + }, "gt": { "shape": "__long", "locationName": "gt", @@ -3317,7 +3375,7 @@ "documentation": "

The total number of occurrences of the data that was detected by the custom data identifiers and produced the finding.

" } }, - "documentation": "

Provides information about the number of occurrences of the data that produced a sensitive data finding, and the custom data identifiers that detected the data for the finding.

" + "documentation": "

Provides information about custom data identifiers that produced a sensitive data finding, and the number of occurrences of the data that they detected for the finding.

" }, "CustomDetection": { "type": "structure", @@ -3330,15 +3388,20 @@ "count": { "shape": "__long", "locationName": "count", - "documentation": "

The total number of occurrences of the data that the custom data identifier detected for the finding.

" + "documentation": "

The total number of occurrences of the sensitive data that the custom data identifier detected.

" }, "name": { "shape": "__string", "locationName": "name", "documentation": "

The name of the custom data identifier.

" + }, + "occurrences": { + "shape": "Occurrences", + "locationName": "occurrences", + "documentation": "

The location of 1-15 occurrences of the sensitive data that the custom data identifier detected. A finding includes location data for a maximum of 15 occurrences of sensitive data.

" } }, - "documentation": "

Provides information about a custom data identifier that produced a sensitive data finding, and the number of occurrences of the data that it detected for the finding.

" + "documentation": "

Provides information about a custom data identifier that produced a sensitive data finding, and the sensitive data that it detected for the finding.

" }, "CustomDetections": { "type": "list", @@ -3393,15 +3456,20 @@ "count": { "shape": "__long", "locationName": "count", - "documentation": "

The total number of occurrences of the type of data that was detected.

" + "documentation": "

The total number of occurrences of the type of sensitive data that was detected.

" + }, + "occurrences": { + "shape": "Occurrences", + "locationName": "occurrences", + "documentation": "

The location of 1-15 occurrences of the sensitive data that was detected. A finding includes location data for a maximum of 15 occurrences of sensitive data.

" }, "type": { "shape": "__string", "locationName": "type", - "documentation": "

The type of data that was detected. For example, AWS_CREDENTIALS, PHONE_NUMBER, or ADDRESS.

" + "documentation": "

The type of sensitive data that was detected. For example, AWS_CREDENTIALS, PHONE_NUMBER, or ADDRESS.

" } }, - "documentation": "

Provides information about sensitive data that was detected by managed data identifiers and produced a sensitive data finding.

" + "documentation": "

Provides information about a type of sensitive data that was detected by managed data identifiers and produced a sensitive data finding.

" }, "DefaultDetections": { "type": "list", @@ -3583,17 +3651,22 @@ "jobStatus": { "shape": "JobStatus", "locationName": "jobStatus", - "documentation": "

The current status of the job. Possible values are:

  • CANCELLED - You cancelled the job. A job might also be cancelled if ownership of an S3 bucket changed while the job was running, and that change affected the job's access to the bucket.

  • COMPLETE - Amazon Macie finished processing all the data specified for the job.

  • IDLE - For a recurring job, the previous scheduled run is complete and the next scheduled run is pending. This value doesn't apply to jobs that occur only once.

  • PAUSED - Amazon Macie started the job, but completion of the job would exceed one or more quotas for your account.

  • RUNNING - The job is in progress.

" + "documentation": "

The current status of the job. Possible values are:

  • CANCELLED - You cancelled the job, or you paused the job while it had a status of RUNNING and you didn't resume it within 30 days of pausing it.

  • COMPLETE - For a one-time job, Amazon Macie finished processing the data specified for the job. This value doesn't apply to recurring jobs.

  • IDLE - For a recurring job, the previous scheduled run is complete and the next scheduled run is pending. This value doesn't apply to one-time jobs.

  • PAUSED - Amazon Macie started running the job but additional processing would exceed the monthly sensitive data discovery quota for your account or one or more member accounts that the job analyzes data for.

  • RUNNING - For a one-time job, the job is in progress. For a recurring job, a scheduled run is in progress.

  • USER_PAUSED - You paused the job. If you paused the job while it had a status of RUNNING and you don't resume the job within 30 days of pausing it, the job expires and is cancelled. To check the job's expiration date, refer to the UserPausedDetails.jobExpiresAt property.

" }, "jobType": { "shape": "JobType", "locationName": "jobType", - "documentation": "

The schedule for running the job. Possible values are:

  • ONE_TIME - The job ran or will run only once.

  • SCHEDULED - The job runs on a daily, weekly, or monthly basis. The scheduleFrequency property indicates the recurrence pattern for the job.

" + "documentation": "

The schedule for running the job. Possible values are:

  • ONE_TIME - The job runs only once.

  • SCHEDULED - The job runs on a daily, weekly, or monthly basis. The scheduleFrequency property indicates the recurrence pattern for the job.

" + }, + "lastRunErrorStatus": { + "shape": "LastRunErrorStatus", + "locationName": "lastRunErrorStatus", + "documentation": "

Specifies whether any account- or bucket-level access errors occurred when the job ran. For a recurring job, this value indicates the error status of the job's most recent run.

" }, "lastRunTime": { "shape": "__timestampIso8601", "locationName": "lastRunTime", - "documentation": "

The date and time, in UTC and extended ISO 8601 format, when the job last ran.

" + "documentation": "

The date and time, in UTC and extended ISO 8601 format, when the job started. If the job is a recurring job, this value indicates when the most recent run started.

" }, "name": { "shape": "__string", @@ -3618,12 +3691,17 @@ "statistics": { "shape": "Statistics", "locationName": "statistics", - "documentation": "

The number of times that the job has run and processing statistics for the job's most recent run.

" + "documentation": "

The number of times that the job has run and processing statistics for the job's current run.

" }, "tags": { "shape": "TagMap", "locationName": "tags", - "documentation": "

A map of key-value pairs that identifies the tags (keys and values) that are associated with the classification job.

" + "documentation": "

A map of key-value pairs that specifies which tags (keys and values) are associated with the classification job.

" + }, + "userPausedDetails": { + "shape": "UserPausedDetails", + "locationName": "userPausedDetails", + "documentation": "

If the current status of the job is USER_PAUSED, specifies when the job was paused and when the job will expire and be cancelled if it isn't resumed. This value is present only if the value for jobStatus is USER_PAUSED.

" } } }, @@ -3642,7 +3720,7 @@ "maxAccountLimitReached": { "shape": "__boolean", "locationName": "maxAccountLimitReached", - "documentation": "

Specifies whether the maximum number of Amazon Macie member accounts are already associated with the AWS organization.

" + "documentation": "

Specifies whether the maximum number of Amazon Macie member accounts are part of the AWS organization.

" } } }, @@ -3753,7 +3831,7 @@ "adminAccountId": { "shape": "__string", "locationName": "adminAccountId", - "documentation": "

The AWS account ID for the account.

" + "documentation": "

The AWS account ID for the account to designate as the delegated Amazon Macie administrator account for the organization.

" }, "clientToken": { "shape": "__string", @@ -3772,7 +3850,7 @@ }, "EncryptionType": { "type": "string", - "documentation": "

The type of server-side encryption that's used to encrypt objects in the S3 bucket. Valid values are:

", + "documentation": "

The type of server-side encryption that's used to encrypt an S3 object or objects in an S3 bucket. Valid values are:

", "enum": [ "NONE", "AES256", @@ -3845,7 +3923,7 @@ "count": { "shape": "__long", "locationName": "count", - "documentation": "

The total number of occurrences of this finding.

" + "documentation": "

The total number of occurrences of the finding. For sensitive data findings, this value is always 1. All sensitive data findings are considered new (unique) because they derive from individual classification jobs.

" }, "createdAt": { "shape": "__timestampIso8601", @@ -3895,7 +3973,7 @@ "severity": { "shape": "Severity", "locationName": "severity", - "documentation": "

The severity of the finding.

" + "documentation": "

The severity level and score for the finding.

" }, "title": { "shape": "__string", @@ -3910,10 +3988,10 @@ "updatedAt": { "shape": "__timestampIso8601", "locationName": "updatedAt", - "documentation": "

The date and time, in UTC and extended ISO 8601 format, when the finding was last updated. For sensitive data findings, this value is the same as the value for the createdAt property. Sensitive data findings aren't updated.

" + "documentation": "

The date and time, in UTC and extended ISO 8601 format, when the finding was last updated. For sensitive data findings, this value is the same as the value for the createdAt property. All sensitive data findings are considered new (unique) because they derive from individual classification jobs.

" } }, - "documentation": "

Provides information about a finding.

" + "documentation": "

Provides the details of a finding.

" }, "FindingAction": { "type": "structure", @@ -4013,7 +4091,7 @@ }, "FindingType": { "type": "string", - "documentation": "

The type of finding. Valid values are:

", + "documentation": "

The type of finding. For details about each type, see Types of Amazon Macie findings in the Amazon Macie User Guide. Valid values are:

", "enum": [ "SensitiveData:S3Object/Multiple", "SensitiveData:S3Object/Financial", @@ -4102,12 +4180,12 @@ "classifiableObjectCount": { "shape": "__long", "locationName": "classifiableObjectCount", - "documentation": "

The total number of objects that Amazon Macie can analyze in the buckets. These objects use a supported file or storage format and storage class.

" + "documentation": "

The total number of objects that Amazon Macie can analyze in the buckets. These objects use a supported storage class and have a file name extension for a supported file or storage format.

" }, "classifiableSizeInBytes": { "shape": "__long", "locationName": "classifiableSizeInBytes", - "documentation": "

The total storage size, in bytes, of all the objects that Amazon Macie can analyze in the buckets. These objects use a supported file or storage format and storage class.

" + "documentation": "

The total storage size, in bytes, of all the objects that Amazon Macie can analyze in the buckets. These objects use a supported storage class and have a file name extension for a supported file or storage format.

" }, "lastUpdated": { "shape": "__timestampIso8601", @@ -4132,12 +4210,12 @@ "unclassifiableObjectCount": { "shape": "ObjectLevelStatistics", "locationName": "unclassifiableObjectCount", - "documentation": "

The total number of objects that Amazon Macie can't analyze in the buckets. These objects use an unsupported file or storage format or storage class.

" + "documentation": "

The total number of objects that Amazon Macie can't analyze in the buckets. These objects don't use a supported storage class or don't have a file name extension for a supported file or storage format.

" }, "unclassifiableObjectSizeInBytes": { "shape": "ObjectLevelStatistics", "locationName": "unclassifiableObjectSizeInBytes", - "documentation": "

The total storage size, in bytes, of all the objects that Amazon Macie can't analyze in the buckets. These objects use an unsupported file or storage format or storage class.

" + "documentation": "

The total storage size, in bytes, of all the objects that Amazon Macie can't analyze in the buckets. These objects don't use a supported storage class or don't have a file name extension for a supported file or storage format.

" } } }, @@ -4240,7 +4318,7 @@ "groupBy": { "shape": "GroupBy", "locationName": "groupBy", - "documentation": "

The finding property to use to group the query results. Valid values are:

  • classificationDetails.jobId - The unique identifier for the classification job that produced the finding.

  • resourcesAffected.s3Bucket.name - The name of the S3 bucket that the finding applies to.

  • severity.description - The severity of the finding, such as High or Medium.

  • type - The type of finding, such as Policy:IAMUser/S3BucketPublic and SensitiveData:S3Object/Personal.

" + "documentation": "

The finding property to use to group the query results. Valid values are:

  • classificationDetails.jobId - The unique identifier for the classification job that produced the finding.

  • resourcesAffected.s3Bucket.name - The name of the S3 bucket that the finding applies to.

  • severity.description - The severity level of the finding, such as High or Medium.

  • type - The type of finding, such as Policy:IAMUser/S3BucketPublic and SensitiveData:S3Object/Personal.

" }, "size": { "shape": "__integer", @@ -4332,7 +4410,7 @@ "findingIds": { "shape": "__listOf__string", "locationName": "findingIds", - "documentation": "

An array of strings that lists the unique identifiers for the findings to retrieve information about.

" + "documentation": "

An array of strings that lists the unique identifiers for the findings to retrieve.

" }, "sortCriteria": { "shape": "SortCriteria", @@ -4721,6 +4799,22 @@ }, "documentation": "

Provides information about the registered owner of an IP address.

" }, + "IsDefinedInJob": { + "type": "string", + "enum": [ + "TRUE", + "FALSE", + "UNKNOWN" + ] + }, + "IsMonitoredByJob": { + "type": "string", + "enum": [ + "TRUE", + "FALSE", + "UNKNOWN" + ] + }, "JobComparator": { "type": "string", "documentation": "

The operator to use in a condition. Valid values are:

", @@ -4734,6 +4828,32 @@ "CONTAINS" ] }, + "JobDetails": { + "type": "structure", + "members": { + "isDefinedInJob": { + "shape": "IsDefinedInJob", + "locationName": "isDefinedInJob", + "documentation": "

Specifies whether any one-time or recurring jobs are configured to analyze data in the bucket. Possible values are:

  • TRUE - One or more jobs is configured to analyze data in the bucket, and at least one of those jobs has a status other than CANCELLED.

  • FALSE - No jobs are configured to analyze data in the bucket, or all the jobs that are configured to analyze data in the bucket have a status of CANCELLED.

  • UNKNOWN - An exception occurred when Amazon Macie attempted to retrieve job data for the bucket.

" + }, + "isMonitoredByJob": { + "shape": "IsMonitoredByJob", + "locationName": "isMonitoredByJob", + "documentation": "

Specifies whether any recurring jobs are configured to analyze data in the bucket. Possible values are:

  • TRUE - One or more recurring jobs is configured to analyze data in the bucket, and at least one of those jobs has a status other than CANCELLED.

  • FALSE - No recurring jobs are configured to analyze data in the bucket, or all the recurring jobs that are configured to analyze data in the bucket have a status of CANCELLED.

  • UNKNOWN - An exception occurred when Amazon Macie attempted to retrieve job data for the bucket.

" + }, + "lastJobId": { + "shape": "__string", + "locationName": "lastJobId", + "documentation": "

The unique identifier for the job that ran most recently (either the latest run of a recurring job or the only run of a one-time job) and is configured to analyze data in the bucket.

This value is null if the value for the isDefinedInJob property is FALSE or UNKNOWN.

" + }, + "lastJobRunTime": { + "shape": "__timestampIso8601", + "locationName": "lastJobRunTime", + "documentation": "

The date and time, in UTC and extended ISO 8601 format, when the job (lastJobId) started. If the job is a recurring job, this value indicates when the most recent run started.

This value is null if the value for the isDefinedInJob property is FALSE or UNKNOWN.

" + } + }, + "documentation": "

Specifies whether any one-time or recurring classification jobs are configured to analyze data in an S3 bucket, and, if so, the details of the job that ran most recently.

" + }, "JobScheduleFrequency": { "type": "structure", "members": { @@ -4766,7 +4886,7 @@ "tagScopeTerm": { "shape": "TagScopeTerm", "locationName": "tagScopeTerm", - "documentation": "

A tag-based condition that defines the operator and a tag key or tag keys and values for including or excluding an object from the job.

" + "documentation": "

A tag-based condition that defines the operator and tag keys or tag key and value pairs for including or excluding an object from the job.

" } }, "documentation": "

Specifies a property- or tag-based condition that defines criteria for including or excluding objects from a classification job.

" @@ -4780,17 +4900,18 @@ "documentation": "

An array of conditions, one for each condition that determines which objects to include or exclude from the job.

" } }, - "documentation": "

Specifies one or more property- and tag-based conditions that define criteria for including or excluding objects from a classification job.

" + "documentation": "

Specifies one or more property- and tag-based conditions that define criteria for including or excluding objects from a classification job. If you specify more than one condition, Amazon Macie uses an AND operator to join the conditions.

" }, "JobStatus": { "type": "string", - "documentation": "

The current status of a classification job. Possible values are:

", + "documentation": "

The status of a classification job. Possible values are:

", "enum": [ "RUNNING", "PAUSED", "CANCELLED", "COMPLETE", - "IDLE" + "IDLE", + "USER_PAUSED" ] }, "JobSummary": { @@ -4814,17 +4935,27 @@ "jobStatus": { "shape": "JobStatus", "locationName": "jobStatus", - "documentation": "

The current status of the job. Possible values are:

  • CANCELLED - You cancelled the job. A job might also be cancelled if ownership of an S3 bucket changed while the job was running, and that change affected the job's access to the bucket.

  • COMPLETE - Amazon Macie finished processing all the data specified for the job.

  • IDLE - For a recurring job, the previous scheduled run is complete and the next scheduled run is pending. This value doesn't apply to jobs that occur only once.

  • PAUSED - Amazon Macie started the job, but completion of the job would exceed one or more quotas for your account.

  • RUNNING - The job is in progress.

" + "documentation": "

The current status of the job. Possible values are:

  • CANCELLED - You cancelled the job, or you paused the job while it had a status of RUNNING and you didn't resume it within 30 days of pausing it.

  • COMPLETE - For a one-time job, Amazon Macie finished processing the data specified for the job. This value doesn't apply to recurring jobs.

  • IDLE - For a recurring job, the previous scheduled run is complete and the next scheduled run is pending. This value doesn't apply to one-time jobs.

  • PAUSED - Amazon Macie started running the job but additional processing would exceed the monthly sensitive data discovery quota for your account or one or more member accounts that the job analyzes data for.

  • RUNNING - For a one-time job, the job is in progress. For a recurring job, a scheduled run is in progress.

  • USER_PAUSED - You paused the job. If you paused the job while it had a status of RUNNING and you don't resume the job within 30 days of pausing it, the job expires and is cancelled. To check the job's expiration date, refer to the UserPausedDetails.jobExpiresAt property.

" }, "jobType": { "shape": "JobType", "locationName": "jobType", - "documentation": "

The schedule for running the job. Possible values are:

  • ONE_TIME - The job ran or will run only once.

  • SCHEDULED - The job runs on a daily, weekly, or monthly basis.

" + "documentation": "

The schedule for running the job. Possible values are:

  • ONE_TIME - The job runs only once.

  • SCHEDULED - The job runs on a daily, weekly, or monthly basis.

" + }, + "lastRunErrorStatus": { + "shape": "LastRunErrorStatus", + "locationName": "lastRunErrorStatus", + "documentation": "

Specifies whether any account- or bucket-level access errors occurred when the job ran. For a recurring job, this value indicates the error status of the job's most recent run.

" }, "name": { "shape": "__string", "locationName": "name", "documentation": "

The custom name of the job.

" + }, + "userPausedDetails": { + "shape": "UserPausedDetails", + "locationName": "userPausedDetails", + "documentation": "

If the current status of the job is USER_PAUSED, specifies when the job was paused and when the job will expire and be cancelled if it isn't resumed. This value is present only if the value for jobStatus is USER_PAUSED.

" } }, "documentation": "

Provides information about a classification job, including the current status of the job.

" @@ -4848,7 +4979,7 @@ "value": { "shape": "__string", "locationName": "value", - "documentation": "

One part of a key-value pair that comprises a tag. A tag value acts as a descriptor for a tag key. A tag value can be empty or null.

" + "documentation": "

One part of a key-value pair that comprises a tag. A tag value acts as a descriptor for a tag key. A tag value can be an empty string.

" } }, "documentation": "

Provides information about the tags that are associated with an S3 bucket or object. Each tag consists of a required tag key and an associated tag value.

" @@ -4860,6 +4991,25 @@ "shape": "KeyValuePair" } }, + "LastRunErrorStatus": { + "type": "structure", + "members": { + "code": { + "shape": "LastRunErrorStatusCode", + "locationName": "code", + "documentation": "

Specifies whether any account- or bucket-level access errors occurred when the job ran. For a recurring job, this value indicates the error status of the job's most recent run. Possible values are:

  • ERROR - One or more errors occurred. Amazon Macie didn't process all the data specified for the job.

  • NONE - No errors occurred. Macie processed all the data specified for the job.

" + } + }, + "documentation": "

Specifies whether any account- or bucket-level access errors occurred when a classification job ran. For example, the job is configured to analyze data for a member account that was suspended, or the job is configured to analyze an S3 bucket that Amazon Macie isn't allowed to access.

" + }, + "LastRunErrorStatusCode": { + "type": "string", + "documentation": "

Specifies whether any account- or bucket-level access errors occurred during the run of a one-time classification job or the most recent run of a recurring classification job. Possible values are:

", + "enum": [ + "NONE", + "ERROR" + ] + }, "ListClassificationJobsRequest": { "type": "structure", "members": { @@ -5168,7 +5318,7 @@ "adminAccounts": { "shape": "__listOfAdminAccount", "locationName": "adminAccounts", - "documentation": "

An array of objects, one for each account that's designated as a delegated administrator of Amazon Macie for the AWS organization. Of those accounts, only one can have a status of ENABLED.

" + "documentation": "

An array of objects, one for each delegated Amazon Macie administrator account for the organization. Only one of these accounts can have a status of ENABLED.

" }, "nextToken": { "shape": "__string", @@ -5266,7 +5416,7 @@ "dayOfMonth": { "shape": "__integer", "locationName": "dayOfMonth", - "documentation": "

The numeric day of the month when Amazon Macie runs the job. This value can be an integer from 1 through 30.

" + "documentation": "

The numeric day of the month when Amazon Macie runs the job. This value can be an integer from 1 through 31.

If this value exceeds the number of days in a certain month, Macie runs the job on the last day of that month. For example, if this value is 31 and a month has only 30 days, Macie runs the job on day 30 of that month.

" } }, "documentation": "

Specifies a monthly recurrence pattern for running a classification job.

" @@ -5282,12 +5432,12 @@ "kmsManaged": { "shape": "__long", "locationName": "kmsManaged", - "documentation": "

The total number of objects that are encrypted using an AWS Key Management Service (AWS KMS) customer master key (CMK). The objects use AWS KMS AWS-managed (AWS-KMS) encryption or AWS KMS customer-managed (SSE-KMS) encryption.

" + "documentation": "

The total number of objects that are encrypted using an AWS Key Management Service (AWS KMS) customer master key (CMK). The objects use AWS managed AWS KMS (AWS-KMS) encryption or customer managed AWS KMS (SSE-KMS) encryption.

" }, "s3Managed": { "shape": "__long", "locationName": "s3Managed", - "documentation": "

The total number of objects that are encrypted using an Amazon S3-managed key. The objects use Amazon S3-managed (SSE-S3) encryption.

" + "documentation": "

The total number of objects that are encrypted using an Amazon S3 managed key. The objects use Amazon S3 managed (SSE-S3) encryption.

" }, "unencrypted": { "shape": "__long", @@ -5303,7 +5453,7 @@ "fileType": { "shape": "__long", "locationName": "fileType", - "documentation": "

The total storage size (in bytes) or number of objects that Amazon Macie can't analyze because the objects use an unsupported file or storage format.

" + "documentation": "

The total storage size (in bytes) or number of objects that Amazon Macie can't analyze because the objects don't have a file name extension for a supported file or storage format.

" }, "storageClass": { "shape": "__long", @@ -5313,11 +5463,42 @@ "total": { "shape": "__long", "locationName": "total", - "documentation": "

The total storage size (in bytes) or number of objects that Amazon Macie can't analyze because the objects use an unsupported file or storage format or storage class.

" + "documentation": "

The total storage size (in bytes) or number of objects that Amazon Macie can't analyze because the objects use an unsupported storage class or don't have a file name extension for a supported file or storage format.

" } }, "documentation": "

Provides information about the total storage size (in bytes) or number of objects that Amazon Macie can't analyze in one or more S3 buckets. In a BucketMetadata object, this data is for a specific bucket. In a GetBucketStatisticsResponse object, this data is aggregated for all the buckets in the query results.

" }, + "Occurrences": { + "type": "structure", + "members": { + "cells": { + "shape": "Cells", + "locationName": "cells", + "documentation": "

An array of objects, one for each occurrence of sensitive data in a Microsoft Excel workbook, CSV file, or TSV file. Each object specifies the cell or field that contains the data. This value is null for all other types of files.

" + }, + "lineRanges": { + "shape": "Ranges", + "locationName": "lineRanges", + "documentation": "

An array of objects, one for each occurrence of sensitive data in a Microsoft Word document or non-binary text file, such as an HTML, JSON, TXT, or XML file. Each object specifies the line that contains the data, and the position of the data on that line.

This value is often null for file types that are supported by Cell, Page, or Record objects. Exceptions are the locations of: data in unstructured sections of an otherwise structured file, such as a comment in a file; and, data in a malformed file that Amazon Macie analyzes as plain text.

" + }, + "offsetRanges": { + "shape": "Ranges", + "locationName": "offsetRanges", + "documentation": "

An array of objects, one for each occurrence of sensitive data in a binary text file. Each object specifies the position of the data relative to the beginning of the file.

This value is typically null. For binary text files, Amazon Macie adds location data to a lineRanges.Range or Page object, depending on the file type.

" + }, + "pages": { + "shape": "Pages", + "locationName": "pages", + "documentation": "

An array of objects, one for each occurrence of sensitive data in an Adobe Portable Document Format file. Each object specifies the page that contains the data, and the position of the data on that page. This value is null for all other types of files.

" + }, + "records": { + "shape": "Records", + "locationName": "records", + "documentation": "

An array of objects, one for each occurrence of sensitive data in an Apache Avro object container or Apache Parquet file. Each object specifies the record index and the path to the field in the record that contains the data. This value is null for all other types of files.

" + } + }, + "documentation": "

Provides the location of 1-15 occurrences of sensitive data that was detected by managed data identifiers or a custom data identifier and produced a sensitive data finding.

" + }, "OrderBy": { "type": "string", "enum": [ @@ -5325,6 +5506,34 @@ "DESC" ] }, + "Page": { + "type": "structure", + "members": { + "lineRange": { + "shape": "Range", + "locationName": "lineRange", + "documentation": "

The line that contains the data, and the position of the data on that line.

" + }, + "offsetRange": { + "shape": "Range", + "locationName": "offsetRange", + "documentation": "

The position of the data on the page, relative to the beginning of the page.

" + }, + "pageNumber": { + "shape": "__long", + "locationName": "pageNumber", + "documentation": "

The page number of the page that contains the data.

" + } + }, + "documentation": "

Specifies the location of an occurrence of sensitive data in an Adobe Portable Document Format file.

" + }, + "Pages": { + "type": "list", + "documentation": "

Specifies the location of occurrences of sensitive data in an Adobe Portable Document Format file.

", + "member": { + "shape": "Page" + } + }, "PolicyDetails": { "type": "structure", "members": { @@ -5364,6 +5573,57 @@ } } }, + "Range": { + "type": "structure", + "members": { + "end": { + "shape": "__long", + "locationName": "end", + "documentation": "

Possible values are:

  • In an Occurrences.lineRanges array, the number of lines from the beginning of the file to the end of the sensitive data.

  • In an Occurrences.offsetRanges array, the number of characters from the beginning of the file to the end of the sensitive data.

  • In a Page object, the number of lines (lineRange) or characters (offsetRange) from the beginning of the page to the end of the sensitive data.

" + }, + "start": { + "shape": "__long", + "locationName": "start", + "documentation": "

Possible values are:

  • In an Occurrences.lineRanges array, the number of lines from the beginning of the file to the beginning of the sensitive data.

  • In an Occurrences.offsetRanges array, the number of characters from the beginning of the file to the beginning of the sensitive data.

  • In a Page object, the number of lines (lineRange) or characters (offsetRange) from the beginning of the page to the beginning of the sensitive data.

" + }, + "startColumn": { + "shape": "__long", + "locationName": "startColumn", + "documentation": "

The column number for the column that contains the data, if the file contains structured data.

" + } + }, + "documentation": "

Provides details about the location of an occurrence of sensitive data in an Adobe Portable Document Format file, Microsoft Word document, or non-binary text file.

" + }, + "Ranges": { + "type": "list", + "documentation": "

Provides details about the location of occurrences of sensitive data in an Adobe Portable Document Format file, Microsoft Word document, or non-binary text file.

", + "member": { + "shape": "Range" + } + }, + "Record": { + "type": "structure", + "members": { + "jsonPath": { + "shape": "__string", + "locationName": "jsonPath", + "documentation": "

The path, as a JSONPath expression, to the field in the record that contains the data.

If the name of an element exceeds 20 characters, Amazon Macie truncates the name by removing characters from the beginning of the name. If the resulting full path exceeds 250 characters, Macie also truncates the path, starting with the first element in the path, until the path contains 250 or fewer characters.

" + }, + "recordIndex": { + "shape": "__long", + "locationName": "recordIndex", + "documentation": "

The record index, starting from 0, for the record that contains the data.

" + } + }, + "documentation": "

Specifies the location of an occurrence of sensitive data in an Apache Avro object container or Apache Parquet file.

" + }, + "Records": { + "type": "list", + "documentation": "

Specifies the location of occurrences of sensitive data in an Apache Parquet file.

", + "member": { + "shape": "Record" + } + }, "RelationshipStatus": { "type": "string", "documentation": "

The current status of the relationship between an account and an associated Amazon Macie master account (inviter account). Possible values are:

", @@ -5448,7 +5708,7 @@ "defaultServerSideEncryption": { "shape": "ServerSideEncryption", "locationName": "defaultServerSideEncryption", - "documentation": "

The server-side encryption settings for the bucket.

" + "documentation": "

The type of server-side encryption that's used by default to encrypt objects in the bucket.

" }, "name": { "shape": "__string", @@ -5479,7 +5739,7 @@ "accountId": { "shape": "__string", "locationName": "accountId", - "documentation": "

The unique identifier for the AWS account that owns one or more of the buckets. If specified, the job analyzes objects in all the buckets that are owned by the account and meet other conditions specified for the job.

" + "documentation": "

The unique identifier for the AWS account that owns the buckets. If you specify this value and don't specify a value for the buckets array, the job analyzes objects in all the buckets that are owned by the account and meet other conditions specified for the job.

" }, "buckets": { "shape": "__listOf__string", @@ -5487,7 +5747,7 @@ "documentation": "

An array that lists the names of the buckets.

" } }, - "documentation": "

Specifies which S3 buckets contain the objects that a classification job analyzes.

" + "documentation": "

Specifies which AWS account owns the S3 buckets that a classification job analyzes, and the buckets to analyze for the account.

" }, "S3BucketOwner": { "type": "structure", @@ -5536,7 +5796,7 @@ "bucketDefinitions": { "shape": "__listOfS3BucketDefinitionForJob", "locationName": "bucketDefinitions", - "documentation": "

An array of objects, one for each bucket that contains objects to analyze.

" + "documentation": "

An array of objects, one for each AWS account that owns buckets to analyze. Each object specifies the account ID for an account and one or more buckets to analyze for the account.

" }, "scoping": { "shape": "Scoping", @@ -5587,7 +5847,7 @@ "serverSideEncryption": { "shape": "ServerSideEncryption", "locationName": "serverSideEncryption", - "documentation": "

The server-side encryption settings for the object.

" + "documentation": "

The type of server-side encryption that's used for the object.

" }, "size": { "shape": "__long", @@ -5637,7 +5897,7 @@ "documentation": "

The property- or tag-based conditions that determine which objects to include in the analysis.

" } }, - "documentation": "

Specifies one or more property- and tag-based conditions that refine the scope of a classification job. These conditions define criteria that determine which objects a job analyzes.

" + "documentation": "

Specifies one or more property- and tag-based conditions that refine the scope of a classification job. These conditions define criteria that determine which objects a job analyzes. Exclude conditions take precedence over include conditions.

" }, "SensitiveData": { "type": "list", @@ -5652,12 +5912,12 @@ "category": { "shape": "SensitiveDataItemCategory", "locationName": "category", - "documentation": "

The category of sensitive data that was detected. For example: FINANCIAL_INFORMATION, for financial information such as credit card numbers; PERSONAL_INFORMATION, for personally identifiable information such as full names and mailing addresses; or, CUSTOM_IDENTIFIER, for data that was detected by a custom data identifier.

" + "documentation": "

The category of sensitive data that was detected. For example: CREDENTIALS, for credentials data such as private keys or AWS secret keys; FINANCIAL_INFORMATION, for financial data such as credit card numbers; or, PERSONAL_INFORMATION, for personal health information, such as health insurance identification numbers, or personally identifiable information, such as driver's license identification numbers.

" }, "detections": { "shape": "DefaultDetections", "locationName": "detections", - "documentation": "

An array of objects, one for each type of sensitive data that was detected. Each object reports the number of occurrences of a specific type of sensitive data that was detected.

" + "documentation": "

An array of objects, one for each type of sensitive data that was detected. Each object reports the number of occurrences of a specific type of sensitive data that was detected, and the location of up to 15 of those occurrences.

" }, "totalCount": { "shape": "__long", @@ -5665,7 +5925,7 @@ "documentation": "

The total number of occurrences of the sensitive data that was detected.

" } }, - "documentation": "

Provides information about the category, type, and number of occurrences of sensitive data that produced a finding.

" + "documentation": "

Provides information about the category, types, and occurrences of sensitive data that produced a sensitive data finding.

" }, "SensitiveDataItemCategory": { "type": "string", @@ -5691,7 +5951,7 @@ "documentation": "

The unique identifier for the AWS Key Management Service (AWS KMS) master key that's used to encrypt the bucket or object. This value is null if AWS KMS isn't used to encrypt the bucket or object.

" } }, - "documentation": "

Provides information about the server-side encryption settings for an S3 bucket or object.

" + "documentation": "

Provides information about the server-side encryption settings for an S3 bucket or S3 object.

" }, "ServiceLimit": { "type": "structure", @@ -5798,19 +6058,19 @@ "description": { "shape": "SeverityDescription", "locationName": "description", - "documentation": "

The textual representation of the severity value, such as Low or High.

" + "documentation": "

The qualitative representation of the finding's severity, ranging from Low (least severe) to High (most severe).

" }, "score": { "shape": "__long", "locationName": "score", - "documentation": "

The numeric score for the severity value, ranging from 0 (least severe) to 4 (most severe).

" + "documentation": "

The numerical representation of the finding's severity, ranging from 1 (least severe) to 3 (most severe).

" } }, - "documentation": "

Provides the numeric score and textual representation of a severity value.

" + "documentation": "

Provides the numerical and qualitative representations of a finding's severity.

" }, "SeverityDescription": { "type": "string", - "documentation": "

The textual representation of the finding's severity. Possible values are:

", + "documentation": "

The qualitative representation of the finding's severity. Possible values are:

", "enum": [ "Low", "Medium", @@ -5832,17 +6092,17 @@ "comparator": { "shape": "JobComparator", "locationName": "comparator", - "documentation": "

The operator to use in the condition.

" + "documentation": "

The operator to use in the condition. Valid operators for each supported property (key) are:

  • OBJECT_EXTENSION - EQ (equals) or NE (not equals)

  • OBJECT_LAST_MODIFIED_DATE - Any operator except CONTAINS

  • OBJECT_SIZE - Any operator except CONTAINS

  • TAG - EQ (equals) or NE (not equals)

" }, "key": { "shape": "ScopeFilterKey", "locationName": "key", - "documentation": "

The property to use in the condition.

" + "documentation": "

The object property to use in the condition.

" }, "values": { "shape": "__listOf__string", "locationName": "values", - "documentation": "

An array that lists one or more values to use in the condition.

" + "documentation": "

An array that lists the values to use in the condition. If the value for the key property is OBJECT_EXTENSION, this array can specify multiple values and Amazon Macie uses an OR operator to join the values. Otherwise, this array can specify only one value. Valid values for each supported property (key) are:

  • OBJECT_EXTENSION - A string that represents the file name extension of an object. For example: doc, docx, pdf

  • OBJECT_LAST_MODIFIED_DATE - The date and time (in UTC and extended ISO 8601 format) when an object was created or last changed, whichever is latest. For example: 2020-09-28T14:31:13Z

  • OBJECT_SIZE - An integer that represents the storage size (in bytes) of an object.

  • TAG - A string that represents a tag key for an object. For advanced options, use a TagScopeTerm object, instead of a SimpleScopeTerm object, to define a tag-based condition for the job.

" } }, "documentation": "

Specifies a property-based condition that determines whether an object is included or excluded from a classification job.

" @@ -5861,7 +6121,7 @@ "documentation": "

The sort order to apply to the results, based on the value for the property specified by the attributeName property. Valid values are: ASC, sort the results in ascending order; and, DESC, sort the results in descending order.

" } }, - "documentation": "

Specifies criteria for sorting the results of a request for information about findings.

" + "documentation": "

Specifies criteria for sorting the results of a request for findings.

" }, "Statistics": { "type": "structure", @@ -5881,7 +6141,7 @@ }, "StorageClass": { "type": "string", - "documentation": "

The storage class of the S3 bucket or object. Possible values are:

", + "documentation": "

The storage class of the S3 object. Possible values are:

", "enum": [ "STANDARD", "REDUCED_REDUNDANCY", @@ -5932,7 +6192,7 @@ "comparator": { "shape": "JobComparator", "locationName": "comparator", - "documentation": "

The operator to use in the condition.

" + "documentation": "

The operator to use in the condition. Valid operators are EQ (equals) or NE (not equals).

" }, "key": { "shape": "__string", @@ -5942,7 +6202,7 @@ "tagValues": { "shape": "__listOfTagValuePair", "locationName": "tagValues", - "documentation": "

The tag key and value pairs to use in the condition.

" + "documentation": "

The tag keys or tag key and value pairs to use in the condition.

" }, "target": { "shape": "TagTarget", @@ -5970,10 +6230,10 @@ "value": { "shape": "__string", "locationName": "value", - "documentation": "

The tag value, associated with the specified tag key, to use in the condition.

" + "documentation": "

The tag value, associated with the specified tag key (key), to use in the condition. To specify only a tag key for a condition, specify the tag key for the key property and set this value to an empty string.

" } }, - "documentation": "

Specifies a tag key and value, as a pair, to use in a tag-based condition for a classification job.

" + "documentation": "

Specifies a tag key or tag key and value pair to use in a tag-based condition for a classification job.

" }, "TestCustomDataIdentifierRequest": { "type": "structure", @@ -6098,7 +6358,7 @@ "jobStatus": { "shape": "JobStatus", "locationName": "jobStatus", - "documentation": "

The status to change the job's status to. The only supported value is CANCELLED, which cancels the job completely.

" + "documentation": "

The new status for the job. Valid values are:

  • CANCELLED - Stops the job permanently and cancels it. You can't resume a job after you cancel it. This value is valid only if the job's current status is IDLE, PAUSED, RUNNING, or USER_PAUSED.

  • RUNNING - Resumes the job. This value is valid only if the job's current status is USER_PAUSED. If you specify this value, Amazon Macie immediately resumes processing from the point where you paused the job. Otherwise, Macie resumes the job according to the schedule and other configuration settings for the job.

  • USER_PAUSED - Pauses the job. This value is valid only if the job's current status is IDLE or RUNNING. If you specify this value and the job's current status is RUNNING, Macie immediately begins to pause all processing tasks for the job.

    If you pause a job when its status is RUNNING and you don't resume the job within 30 days, the job expires and Macie cancels it. You can't resume a job after it's cancelled.

" } }, "required": [ @@ -6444,6 +6704,27 @@ "AWSService" ] }, + "UserPausedDetails": { + "type": "structure", + "members": { + "jobExpiresAt": { + "shape": "__timestampIso8601", + "locationName": "jobExpiresAt", + "documentation": "

The date and time, in UTC and extended ISO 8601 format, when the job will expire and be cancelled if you don't resume it first. If you don't resume a job within 30 days of pausing it, the job expires and Amazon Macie cancels it.

" + }, + "jobImminentExpirationHealthEventArn": { + "shape": "__string", + "locationName": "jobImminentExpirationHealthEventArn", + "documentation": "

The Amazon Resource Name (ARN) of the AWS Health event that Amazon Macie sent to notify you of the job's pending expiration and cancellation. This value is null if a job has been paused for less than 23 days.

" + }, + "jobPausedAt": { + "shape": "__timestampIso8601", + "locationName": "jobPausedAt", + "documentation": "

The date and time, in UTC and extended ISO 8601 format, when you paused the job.

" + } + }, + "documentation": "

Provides information about when a classification job was paused and when it will expire and be cancelled if it isn't resumed. This object is present only if a job's current status (jobStatus) is USER_PAUSED. The information in this object applies only to a job that was paused while it had a status of RUNNING.

" + }, "ValidationException": { "type": "structure", "members": { diff --git a/services/managedblockchain/pom.xml b/services/managedblockchain/pom.xml index a7b7f13f8590..527593051bf4 100644 --- a/services/managedblockchain/pom.xml +++ b/services/managedblockchain/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT managedblockchain AWS Java SDK :: Services :: ManagedBlockchain diff --git a/services/marketplacecatalog/pom.xml b/services/marketplacecatalog/pom.xml index b8bde93dd59b..673686e85f61 100644 --- a/services/marketplacecatalog/pom.xml +++ b/services/marketplacecatalog/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT marketplacecatalog AWS Java SDK :: Services :: Marketplace Catalog diff --git a/services/marketplacecommerceanalytics/pom.xml b/services/marketplacecommerceanalytics/pom.xml index be769284a0ad..2eddca9c2376 100644 --- a/services/marketplacecommerceanalytics/pom.xml +++ b/services/marketplacecommerceanalytics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT marketplacecommerceanalytics AWS Java SDK :: Services :: AWS Marketplace Commerce Analytics diff --git a/services/marketplacecommerceanalytics/src/main/resources/codegen-resources/service-2.json b/services/marketplacecommerceanalytics/src/main/resources/codegen-resources/service-2.json index 141550a1b623..034ca272926f 100644 --- a/services/marketplacecommerceanalytics/src/main/resources/codegen-resources/service-2.json +++ b/services/marketplacecommerceanalytics/src/main/resources/codegen-resources/service-2.json @@ -101,7 +101,7 @@ "members":{ "dataSetType":{ "shape":"DataSetType", - "documentation":"

The desired data set type.

  • customer_subscriber_hourly_monthly_subscriptions

    From 2017-09-15 to present: Available daily by 24:00 UTC.

  • customer_subscriber_annual_subscriptions

    From 2017-09-15 to present: Available daily by 24:00 UTC.

  • daily_business_usage_by_instance_type

    From 2017-09-15 to present: Available daily by 24:00 UTC.

  • daily_business_fees

    From 2017-09-15 to present: Available daily by 24:00 UTC.

  • daily_business_free_trial_conversions

    From 2017-09-15 to present: Available daily by 24:00 UTC.

  • daily_business_new_instances

    From 2017-09-15 to present: Available daily by 24:00 UTC.

  • daily_business_new_product_subscribers

    From 2017-09-15 to present: Available daily by 24:00 UTC.

  • daily_business_canceled_product_subscribers

    From 2017-09-15 to present: Available daily by 24:00 UTC.

  • monthly_revenue_billing_and_revenue_data

    From 2017-09-15 to present: Available monthly on the 15th day of the month by 24:00 UTC. Data includes metered transactions (e.g. hourly) from one month prior.

  • monthly_revenue_annual_subscriptions

    From 2017-09-15 to present: Available monthly on the 15th day of the month by 24:00 UTC. Data includes up-front software charges (e.g. annual) from one month prior.

  • monthly_revenue_field_demonstration_usage

    From 2018-03-15 to present: Available monthly on the 15th day of the month by 24:00 UTC.

  • monthly_revenue_flexible_payment_schedule

    From 2018-11-15 to present: Available monthly on the 15th day of the month by 24:00 UTC.

  • disbursed_amount_by_product

    From 2017-09-15 to present: Available every 30 days by 24:00 UTC.

  • disbursed_amount_by_instance_hours

    From 2017-09-15 to present: Available every 30 days by 24:00 UTC.

  • disbursed_amount_by_customer_geo

    From 2017-09-15 to present: Available every 30 days by 24:00 UTC.

  • disbursed_amount_by_age_of_uncollected_funds

    From 2017-09-15 to present: Available every 30 days by 24:00 UTC.

  • disbursed_amount_by_age_of_disbursed_funds

    From 2017-09-15 to present: Available every 30 days by 24:00 UTC.

  • disbursed_amount_by_age_of_past_due_funds

    From 2018-04-07 to present: Available every 30 days by 24:00 UTC.

  • disbursed_amount_by_uncollected_funds_breakdown

    From 2019-10-04 to present: Available every 30 days by 24:00 UTC.

  • sales_compensation_billed_revenue

    From 2017-09-15 to present: Available monthly on the 15th day of the month by 24:00 UTC. Data includes metered transactions (e.g. hourly) from one month prior, and up-front software charges (e.g. annual) from one month prior.

  • us_sales_and_use_tax_records

    From 2017-09-15 to present: Available monthly on the 15th day of the month by 24:00 UTC.

" + "documentation":"

The desired data set type.

  • customer_subscriber_hourly_monthly_subscriptions

    From 2017-09-15 to present: Available daily by 24:00 UTC.

  • customer_subscriber_annual_subscriptions

    From 2017-09-15 to present: Available daily by 24:00 UTC.

  • daily_business_usage_by_instance_type

    From 2017-09-15 to present: Available daily by 24:00 UTC.

  • daily_business_fees

    From 2017-09-15 to present: Available daily by 24:00 UTC.

  • daily_business_free_trial_conversions

    From 2017-09-15 to present: Available daily by 24:00 UTC.

  • daily_business_new_instances

    From 2017-09-15 to present: Available daily by 24:00 UTC.

  • daily_business_new_product_subscribers

    From 2017-09-15 to present: Available daily by 24:00 UTC.

  • daily_business_canceled_product_subscribers

    From 2017-09-15 to present: Available daily by 24:00 UTC.

  • monthly_revenue_billing_and_revenue_data

    From 2017-09-15 to present: Available monthly on the 15th day of the month by 24:00 UTC. Data includes metered transactions (e.g. hourly) from one month prior.

  • monthly_revenue_annual_subscriptions

    From 2017-09-15 to present: Available monthly on the 15th day of the month by 24:00 UTC. Data includes up-front software charges (e.g. annual) from one month prior.

  • monthly_revenue_field_demonstration_usage

    From 2018-03-15 to present: Available monthly on the 15th day of the month by 24:00 UTC.

  • monthly_revenue_flexible_payment_schedule

    From 2018-11-15 to present: Available monthly on the 15th day of the month by 24:00 UTC.

  • disbursed_amount_by_product

    From 2017-09-15 to present: Available every 30 days by 24:00 UTC.

  • disbursed_amount_by_instance_hours

    From 2017-09-15 to present: Available every 30 days by 24:00 UTC.

  • disbursed_amount_by_customer_geo

    From 2017-09-15 to present: Available every 30 days by 24:00 UTC.

  • disbursed_amount_by_age_of_uncollected_funds

    From 2017-09-15 to present: Available every 30 days by 24:00 UTC.

  • disbursed_amount_by_age_of_disbursed_funds

    From 2017-09-15 to present: Available every 30 days by 24:00 UTC.

  • disbursed_amount_by_age_of_past_due_funds

    From 2018-04-07 to present: Available every 30 days by 24:00 UTC.

  • disbursed_amount_by_uncollected_funds_breakdown

    From 2019-10-04 to present: Available every 30 days by 24:00 UTC.

  • sales_compensation_billed_revenue

    From 2017-09-15 to present: Available monthly on the 15th day of the month by 24:00 UTC. Data includes metered transactions (e.g. hourly) from one month prior, and up-front software charges (e.g. annual) from one month prior.

  • us_sales_and_use_tax_records

    From 2017-09-15 to present: Available monthly on the 15th day of the month by 24:00 UTC.

  • disbursed_amount_by_product_with_uncollected_funds

    This data set is deprecated. Download related reports from AMMP instead!

  • customer_profile_by_industry

    This data set is deprecated. Download related reports from AMMP instead!

  • customer_profile_by_revenue

    This data set is deprecated. Download related reports from AMMP instead!

  • customer_profile_by_geography

    This data set is deprecated. Download related reports from AMMP instead!

" }, "dataSetPublicationDate":{ "shape":"DataSetPublicationDate", diff --git a/services/marketplaceentitlement/pom.xml b/services/marketplaceentitlement/pom.xml index 4dbc8058dd26..f971bb7a9027 100644 --- a/services/marketplaceentitlement/pom.xml +++ b/services/marketplaceentitlement/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT marketplaceentitlement AWS Java SDK :: Services :: AWS Marketplace Entitlement diff --git a/services/marketplacemetering/pom.xml b/services/marketplacemetering/pom.xml index 0c8aa0fcecc6..6249b7c9e198 100644 --- a/services/marketplacemetering/pom.xml +++ b/services/marketplacemetering/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT marketplacemetering AWS Java SDK :: Services :: AWS Marketplace Metering Service diff --git a/services/marketplacemetering/src/main/resources/codegen-resources/service-2.json b/services/marketplacemetering/src/main/resources/codegen-resources/service-2.json index abe33baead0f..a414be5600d9 100644 --- a/services/marketplacemetering/src/main/resources/codegen-resources/service-2.json +++ b/services/marketplacemetering/src/main/resources/codegen-resources/service-2.json @@ -25,12 +25,14 @@ {"shape":"InternalServiceErrorException"}, {"shape":"InvalidProductCodeException"}, {"shape":"InvalidUsageDimensionException"}, + {"shape":"InvalidTagException"}, + {"shape":"InvalidUsageAllocationsException"}, {"shape":"InvalidCustomerIdentifierException"}, {"shape":"TimestampOutOfBoundsException"}, {"shape":"ThrottlingException"}, {"shape":"DisabledApiException"} ], - "documentation":"

BatchMeterUsage is called from a SaaS application listed on the AWS Marketplace to post metering records for a set of customers.

For identical requests, the API is idempotent; requests can be retried with the same records or a subset of the input records.

Every request to BatchMeterUsage is for one product. If you need to meter usage for multiple products, you must make multiple calls to BatchMeterUsage.

BatchMeterUsage can process up to 25 UsageRecords at a time.

" + "documentation":"

BatchMeterUsage is called from a SaaS application listed on the AWS Marketplace to post metering records for a set of customers.

For identical requests, the API is idempotent; requests can be retried with the same records or a subset of the input records.

Every request to BatchMeterUsage is for one product. If you need to meter usage for multiple products, you must make multiple calls to BatchMeterUsage.

BatchMeterUsage can process up to 25 UsageRecords at a time.

A UsageRecord can optionally include multiple usage allocations, to provide customers with usagedata split into buckets by tags that you define (or allow the customer to define).

BatchMeterUsage requests must be less than 1MB in size.

" }, "MeterUsage":{ "name":"MeterUsage", @@ -44,13 +46,15 @@ {"shape":"InternalServiceErrorException"}, {"shape":"InvalidProductCodeException"}, {"shape":"InvalidUsageDimensionException"}, + {"shape":"InvalidTagException"}, + {"shape":"InvalidUsageAllocationsException"}, {"shape":"InvalidEndpointRegionException"}, {"shape":"TimestampOutOfBoundsException"}, {"shape":"DuplicateRequestException"}, {"shape":"ThrottlingException"}, {"shape":"CustomerNotEntitledException"} ], - "documentation":"

API to emit metering records. For identical requests, the API is idempotent. It simply returns the metering record ID.

MeterUsage is authenticated on the buyer's AWS account using credentials from the EC2 instance, ECS task, or EKS pod.

" + "documentation":"

API to emit metering records. For identical requests, the API is idempotent. It simply returns the metering record ID.

MeterUsage is authenticated on the buyer's AWS account using credentials from the EC2 instance, ECS task, or EKS pod.

MeterUsage can optionally include multiple usage allocations, to provide customers with usage data split into buckets by tags that you define (or allow the customer to define).

" }, "RegisterUsage":{ "name":"RegisterUsage", @@ -91,6 +95,11 @@ } }, "shapes":{ + "AllocatedUsageQuantity":{ + "type":"integer", + "max":2147483647, + "min":0 + }, "BatchMeterUsageRequest":{ "type":"structure", "required":[ @@ -127,7 +136,8 @@ "CustomerIdentifier":{ "type":"string", "max":255, - "min":1 + "min":1, + "pattern":"[\\s\\S]+" }, "CustomerNotEntitledException":{ "type":"structure", @@ -210,6 +220,14 @@ "documentation":"

RegisterUsage must be called in the same AWS Region the ECS task was launched in. This prevents a container from hardcoding a Region (e.g. withRegion(“us-east-1”) when calling RegisterUsage.

", "exception":true }, + "InvalidTagException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "documentation":"

The tag is invalid, or the number of tags is greater than 5.

", + "exception":true + }, "InvalidTokenException":{ "type":"structure", "members":{ @@ -218,6 +236,14 @@ "documentation":"

Registration token is invalid.

", "exception":true }, + "InvalidUsageAllocationsException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "documentation":"

The usage allocation objects are invalid, or the number of allocations is greater than 500 for a single usage record.

", + "exception":true + }, "InvalidUsageDimensionException":{ "type":"structure", "members":{ @@ -253,6 +279,10 @@ "DryRun":{ "shape":"Boolean", "documentation":"

Checks whether you have the permissions required for the action, but does not make the request. If you have the permissions, the request returns DryRunOperation; otherwise, it returns UnauthorizedException. Defaults to false if not specified.

" + }, + "UsageAllocations":{ + "shape":"UsageAllocations", + "documentation":"

The set of UsageAllocations to submit.

The sum of all UsageAllocation quantities must equal the UsageQuantity of the MeterUsage request, and each UsageAllocation must have a unique set of tags (include no tags).

" } } }, @@ -267,11 +297,12 @@ }, "NonEmptyString":{ "type":"string", - "pattern":"\\S+" + "pattern":"[\\s\\S]+" }, "Nonce":{ "type":"string", - "max":255 + "max":255, + "pattern":"[\\s\\S]*" }, "PlatformNotSupportedException":{ "type":"structure", @@ -284,7 +315,8 @@ "ProductCode":{ "type":"string", "max":255, - "min":1 + "min":1, + "pattern":"[\\s\\S]+" }, "RegisterUsageRequest":{ "type":"structure", @@ -346,6 +378,42 @@ "documentation":"

The result of the ResolveCustomer operation. Contains the CustomerIdentifier and product code.

" }, "String":{"type":"string"}, + "Tag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{ + "shape":"TagKey", + "documentation":"

One part of a key-value pair that makes up a tag. A key is a label that acts like a category for the specific tag values.

" + }, + "Value":{ + "shape":"TagValue", + "documentation":"

One part of a key-value pair that makes up a tag. A value acts as a descriptor within a tag category (key). The value can be empty or null.

" + } + }, + "documentation":"

Metadata assigned to an allocation. Each tag is made up of a key and a value.

" + }, + "TagKey":{ + "type":"string", + "max":100, + "min":1, + "pattern":"^[a-zA-Z0-9+ -=._:\\/@]+$" + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":5, + "min":1 + }, + "TagValue":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^[a-zA-Z0-9+ -=._:\\/@]+$" + }, "ThrottlingException":{ "type":"structure", "members":{ @@ -363,10 +431,32 @@ "documentation":"

The timestamp value passed in the meterUsage() is out of allowed range.

", "exception":true }, + "UsageAllocation":{ + "type":"structure", + "required":["AllocatedUsageQuantity"], + "members":{ + "AllocatedUsageQuantity":{ + "shape":"AllocatedUsageQuantity", + "documentation":"

The total quantity allocated to this bucket of usage.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The set of tags that define the bucket of usage. For the bucket of items with no tags, this parameter can be left out.

" + } + }, + "documentation":"

Usage allocations allow you to split usage into buckets by tags.

Each UsageAllocation indicates the usage quantity for a specific set of tags.

" + }, + "UsageAllocations":{ + "type":"list", + "member":{"shape":"UsageAllocation"}, + "max":500, + "min":1 + }, "UsageDimension":{ "type":"string", "max":255, - "min":1 + "min":1, + "pattern":"[\\s\\S]+" }, "UsageQuantity":{ "type":"integer", @@ -396,6 +486,10 @@ "Quantity":{ "shape":"UsageQuantity", "documentation":"

The quantity of usage consumed by the customer for the given dimension and time. Defaults to 0 if not specified.

" + }, + "UsageAllocations":{ + "shape":"UsageAllocations", + "documentation":"

The set of UsageAllocations to submit. The sum of all UsageAllocation quantities must equal the Quantity of the UsageRecord.

" } }, "documentation":"

A UsageRecord indicates a quantity of usage for a given product, customer, dimension and time.

Multiple requests with the same UsageRecords as input will be deduplicated to prevent double charges.

" diff --git a/services/mediaconnect/pom.xml b/services/mediaconnect/pom.xml index 845a6b43e1c4..1d0b806ed618 100644 --- a/services/mediaconnect/pom.xml +++ b/services/mediaconnect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT mediaconnect AWS Java SDK :: Services :: MediaConnect diff --git a/services/mediaconvert/pom.xml b/services/mediaconvert/pom.xml index 3dfd499cb392..3f86a5581c5b 100644 --- a/services/mediaconvert/pom.xml +++ b/services/mediaconvert/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT 4.0.0 mediaconvert diff --git a/services/mediaconvert/src/main/resources/codegen-resources/service-2.json b/services/mediaconvert/src/main/resources/codegen-resources/service-2.json index d1baa265ebfe..6b2e185fd53c 100644 --- a/services/mediaconvert/src/main/resources/codegen-resources/service-2.json +++ b/services/mediaconvert/src/main/resources/codegen-resources/service-2.json @@ -1721,6 +1721,38 @@ "USE_CONFIGURED" ] }, + "AutomatedAbrSettings": { + "type": "structure", + "members": { + "MaxAbrBitrate": { + "shape": "__integerMin100000Max100000000", + "locationName": "maxAbrBitrate", + "documentation": "Optional. The maximum target bit rate used in your automated ABR stack. Use this value to set an upper limit on the bandwidth consumed by the highest-quality rendition. This is the rendition that is delivered to viewers with the fastest internet connections. If you don't specify a value, MediaConvert uses 8,000,000 (8 mb/s) by default." + }, + "MaxRenditions": { + "shape": "__integerMin3Max15", + "locationName": "maxRenditions", + "documentation": "Optional. The maximum number of renditions that MediaConvert will create in your automated ABR stack. The number of renditions is determined automatically, based on analysis of each job, but will never exceed this limit. When you set this to Auto in the console, which is equivalent to excluding it from your JSON job specification, MediaConvert defaults to a limit of 15." + }, + "MinAbrBitrate": { + "shape": "__integerMin100000Max100000000", + "locationName": "minAbrBitrate", + "documentation": "Optional. The minimum target bitrate used in your automated ABR stack. Use this value to set a lower limit on the bitrate of video delivered to viewers with slow internet connections. If you don't specify a value, MediaConvert uses 600,000 (600 kb/s) by default." + } + }, + "documentation": "Use automated ABR to have MediaConvert set up the renditions in your ABR package for you automatically, based on characteristics of your input video. This feature optimizes video quality while minimizing the overall size of your ABR package." + }, + "AutomatedEncodingSettings": { + "type": "structure", + "members": { + "AbrSettings": { + "shape": "AutomatedAbrSettings", + "locationName": "abrSettings", + "documentation": "Use automated ABR to have MediaConvert set up the renditions in your ABR package for you automatically, based on characteristics of your input video. This feature optimizes video quality while minimizing the overall size of your ABR package." + } + }, + "documentation": "Use automated encoding to have MediaConvert choose your encoding settings for you, based on characteristics of your input video." + }, "Av1AdaptiveQuantization": { "type": "string", "documentation": "Specify the strength of any adaptive quantization filters that you enable. The value that you choose here applies to Spatial adaptive quantization (spatialAdaptiveQuantization).", @@ -2395,7 +2427,7 @@ }, "CmafClientCache": { "type": "string", - "documentation": "When set to ENABLED, sets #EXT-X-ALLOW-CACHE:no tag, which prevents client from saving media segments for later replay.", + "documentation": "Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no tag. Otherwise, keep the default value Enabled (ENABLED) and control caching in your video distribution set up. For example, use the Cache-Control http header.", "enum": [ "DISABLED", "ENABLED" @@ -2469,7 +2501,7 @@ "ClientCache": { "shape": "CmafClientCache", "locationName": "clientCache", - "documentation": "When set to ENABLED, sets #EXT-X-ALLOW-CACHE:no tag, which prevents client from saving media segments for later replay." + "documentation": "Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no tag. Otherwise, keep the default value Enabled (ENABLED) and control caching in your video distribution set up. For example, use the Cache-Control http header." }, "CodecSpecification": { "shape": "CmafCodecSpecification", @@ -2634,6 +2666,14 @@ "DISABLED" ] }, + "CmfcAudioDuration": { + "type": "string", + "documentation": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec.", + "enum": [ + "DEFAULT_CODEC_DURATION", + "MATCH_VIDEO_DURATION" + ] + }, "CmfcScte35Esam": { "type": "string", "documentation": "Use this setting only when you specify SCTE-35 markers from ESAM. Choose INSERT to put SCTE-35 markers in this output at the insertion points that you specify in an ESAM XML document. Provide the document in the setting SCC XML (sccXml).", @@ -2653,6 +2693,11 @@ "CmfcSettings": { "type": "structure", "members": { + "AudioDuration": { + "shape": "CmfcAudioDuration", + "locationName": "audioDuration", + "documentation": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec." + }, "Scte35Esam": { "shape": "CmfcScte35Esam", "locationName": "scte35Esam", @@ -2891,12 +2936,12 @@ "Tags": { "shape": "__mapOf__string", "locationName": "tags", - "documentation": "Optional. The tags that you want to add to the resource. You can tag resources with a key-value pair or with only a key." + "documentation": "Optional. The tags that you want to add to the resource. You can tag resources with a key-value pair or with only a key. Use standard AWS tags on your job for automatic integration with AWS services and for custom integrations and workflows." }, "UserMetadata": { "shape": "__mapOf__string", "locationName": "userMetadata", - "documentation": "Optional. User-defined metadata that you want to associate with an MediaConvert job. You specify metadata in key/value pairs." + "documentation": "Optional. User-defined metadata that you want to associate with an MediaConvert job. You specify metadata in key/value pairs. Use only for existing integrations or workflows that rely on job metadata tags. Otherwise, we recommend that you use standard AWS tags." } }, "required": [ @@ -3150,6 +3195,11 @@ "locationName": "minBufferTime", "documentation": "Minimum time of initially buffered media that is needed to ensure smooth playout." }, + "MinFinalSegmentLength": { + "shape": "__doubleMin0Max2147483647", + "locationName": "minFinalSegmentLength", + "documentation": "Keep this setting at the default value of 0, unless you are troubleshooting a problem with how devices play back the end of your video asset. If you know that player devices are hanging on the final segment of your video because the length of your final segment is too short, use this setting to specify a minimum final segment length, in seconds. Choose a value that is greater than or equal to 1 and less than your segment length. When you specify a value for this setting, the encoder will combine any final segment that is shorter than the length that you specify with the previous segment. For example, your segment length is 3 seconds and your final segment is .5 seconds without a minimum final segment length; when you set the minimum final segment length to 1, your final segment is 3.5 seconds." + }, "MpdProfile": { "shape": "DashIsoMpdProfile", "locationName": "mpdProfile", @@ -4426,9 +4476,10 @@ }, "H264AdaptiveQuantization": { "type": "string", - "documentation": "Specify the strength of any adaptive quantization filters that you enable. The value that you choose here applies to the following settings: Flicker adaptive quantization (flickerAdaptiveQuantization), Spatial adaptive quantization (spatialAdaptiveQuantization), and Temporal adaptive quantization (temporalAdaptiveQuantization).", + "documentation": "Keep the default value, Auto (AUTO), for this setting to have MediaConvert automatically apply the best types of quantization for your video content. When you want to apply your quantization settings manually, you must set H264AdaptiveQuantization to a value other than Auto (AUTO). Use this setting to specify the strength of any adaptive quantization filters that you enable. If you don't want MediaConvert to do any adaptive quantization in this transcode, set Adaptive quantization (H264AdaptiveQuantization) to Off (OFF). Related settings: The value that you choose here applies to the following settings: H264FlickerAdaptiveQuantization, H264SpatialAdaptiveQuantization, and H264TemporalAdaptiveQuantization.", "enum": [ "OFF", + "AUTO", "LOW", "MEDIUM", "HIGH", @@ -4497,7 +4548,7 @@ }, "H264FlickerAdaptiveQuantization": { "type": "string", - "documentation": "Enable this setting to have the encoder reduce I-frame pop. I-frame pop appears as a visual flicker that can arise when the encoder saves bits by copying some macroblocks many times from frame to frame, and then refreshes them at the I-frame. When you enable this setting, the encoder updates these macroblocks slightly more often to smooth out the flicker. This setting is disabled by default. Related setting: In addition to enabling this setting, you must also set adaptiveQuantization to a value other than Off (OFF).", + "documentation": "Only use this setting when you change the default value, AUTO, for the setting H264AdaptiveQuantization. When you keep all defaults, excluding H264AdaptiveQuantization and all other adaptive quantization from your JSON job specification, MediaConvert automatically applies the best types of quantization for your video content. When you set H264AdaptiveQuantization to a value other than AUTO, the default value for H264FlickerAdaptiveQuantization is Disabled (DISABLED). Change this value to Enabled (ENABLED) to reduce I-frame pop. I-frame pop appears as a visual flicker that can arise when the encoder saves bits by copying some macroblocks many times from frame to frame, and then refreshes them at the I-frame. When you enable this setting, the encoder updates these macroblocks slightly more often to smooth out the flicker. To manually enable or disable H264FlickerAdaptiveQuantization, you must set Adaptive quantization (H264AdaptiveQuantization) to a value other than AUTO.", "enum": [ "DISABLED", "ENABLED" @@ -4617,7 +4668,7 @@ "AdaptiveQuantization": { "shape": "H264AdaptiveQuantization", "locationName": "adaptiveQuantization", - "documentation": "Specify the strength of any adaptive quantization filters that you enable. The value that you choose here applies to the following settings: Flicker adaptive quantization (flickerAdaptiveQuantization), Spatial adaptive quantization (spatialAdaptiveQuantization), and Temporal adaptive quantization (temporalAdaptiveQuantization)." + "documentation": "Keep the default value, Auto (AUTO), for this setting to have MediaConvert automatically apply the best types of quantization for your video content. When you want to apply your quantization settings manually, you must set H264AdaptiveQuantization to a value other than Auto (AUTO). Use this setting to specify the strength of any adaptive quantization filters that you enable. If you don't want MediaConvert to do any adaptive quantization in this transcode, set Adaptive quantization (H264AdaptiveQuantization) to Off (OFF). Related settings: The value that you choose here applies to the following settings: H264FlickerAdaptiveQuantization, H264SpatialAdaptiveQuantization, and H264TemporalAdaptiveQuantization." }, "Bitrate": { "shape": "__integerMin1000Max1152000000", @@ -4652,7 +4703,7 @@ "FlickerAdaptiveQuantization": { "shape": "H264FlickerAdaptiveQuantization", "locationName": "flickerAdaptiveQuantization", - "documentation": "Enable this setting to have the encoder reduce I-frame pop. I-frame pop appears as a visual flicker that can arise when the encoder saves bits by copying some macroblocks many times from frame to frame, and then refreshes them at the I-frame. When you enable this setting, the encoder updates these macroblocks slightly more often to smooth out the flicker. This setting is disabled by default. Related setting: In addition to enabling this setting, you must also set adaptiveQuantization to a value other than Off (OFF)." + "documentation": "Only use this setting when you change the default value, AUTO, for the setting H264AdaptiveQuantization. When you keep all defaults, excluding H264AdaptiveQuantization and all other adaptive quantization from your JSON job specification, MediaConvert automatically applies the best types of quantization for your video content. When you set H264AdaptiveQuantization to a value other than AUTO, the default value for H264FlickerAdaptiveQuantization is Disabled (DISABLED). Change this value to Enabled (ENABLED) to reduce I-frame pop. I-frame pop appears as a visual flicker that can arise when the encoder saves bits by copying some macroblocks many times from frame to frame, and then refreshes them at the I-frame. When you enable this setting, the encoder updates these macroblocks slightly more often to smooth out the flicker. To manually enable or disable H264FlickerAdaptiveQuantization, you must set Adaptive quantization (H264AdaptiveQuantization) to a value other than AUTO." }, "FramerateControl": { "shape": "H264FramerateControl", @@ -4787,7 +4838,7 @@ "SpatialAdaptiveQuantization": { "shape": "H264SpatialAdaptiveQuantization", "locationName": "spatialAdaptiveQuantization", - "documentation": "Keep the default value, Enabled (ENABLED), to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to disable this feature. Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization) depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher." + "documentation": "Only use this setting when you change the default value, Auto (AUTO), for the setting H264AdaptiveQuantization. When you keep all defaults, excluding H264AdaptiveQuantization and all other adaptive quantization from your JSON job specification, MediaConvert automatically applies the best types of quantization for your video content. When you set H264AdaptiveQuantization to a value other than AUTO, the default value for H264SpatialAdaptiveQuantization is Enabled (ENABLED). Keep this default value to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to set H264SpatialAdaptiveQuantization to Disabled (DISABLED). Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization (H264AdaptiveQuantization) depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher. To manually enable or disable H264SpatialAdaptiveQuantization, you must set Adaptive quantization (H264AdaptiveQuantization) to a value other than AUTO." }, "Syntax": { "shape": "H264Syntax", @@ -4802,7 +4853,7 @@ "TemporalAdaptiveQuantization": { "shape": "H264TemporalAdaptiveQuantization", "locationName": "temporalAdaptiveQuantization", - "documentation": "Keep the default value, Enabled (ENABLED), to adjust quantization within each frame based on temporal variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas of the frame that aren't moving and uses more bits on complex objects with sharp edges that move a lot. For example, this feature improves the readability of text tickers on newscasts and scoreboards on sports matches. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen that doesn't have moving objects with sharp edges, such as sports athletes' faces, you might choose to disable this feature. Related setting: When you enable temporal quantization, adjust the strength of the filter with the setting Adaptive quantization (adaptiveQuantization)." + "documentation": "Only use this setting when you change the default value, AUTO, for the setting H264AdaptiveQuantization. When you keep all defaults, excluding H264AdaptiveQuantization and all other adaptive quantization from your JSON job specification, MediaConvert automatically applies the best types of quantization for your video content. When you set H264AdaptiveQuantization to a value other than AUTO, the default value for H264TemporalAdaptiveQuantization is Enabled (ENABLED). Keep this default value to adjust quantization within each frame based on temporal variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas of the frame that aren't moving and uses more bits on complex objects with sharp edges that move a lot. For example, this feature improves the readability of text tickers on newscasts and scoreboards on sports matches. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen that doesn't have moving objects with sharp edges, such as sports athletes' faces, you might choose to set H264TemporalAdaptiveQuantization to Disabled (DISABLED). Related setting: When you enable temporal quantization, adjust the strength of the filter with the setting Adaptive quantization (adaptiveQuantization). To manually enable or disable H264TemporalAdaptiveQuantization, you must set Adaptive quantization (H264AdaptiveQuantization) to a value other than AUTO." }, "UnregisteredSeiTimecode": { "shape": "H264UnregisteredSeiTimecode", @@ -4822,7 +4873,7 @@ }, "H264SpatialAdaptiveQuantization": { "type": "string", - "documentation": "Keep the default value, Enabled (ENABLED), to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to disable this feature. Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization) depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher.", + "documentation": "Only use this setting when you change the default value, Auto (AUTO), for the setting H264AdaptiveQuantization. When you keep all defaults, excluding H264AdaptiveQuantization and all other adaptive quantization from your JSON job specification, MediaConvert automatically applies the best types of quantization for your video content. When you set H264AdaptiveQuantization to a value other than AUTO, the default value for H264SpatialAdaptiveQuantization is Enabled (ENABLED). Keep this default value to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to set H264SpatialAdaptiveQuantization to Disabled (DISABLED). Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization (H264AdaptiveQuantization) depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher. To manually enable or disable H264SpatialAdaptiveQuantization, you must set Adaptive quantization (H264AdaptiveQuantization) to a value other than AUTO.", "enum": [ "DISABLED", "ENABLED" @@ -4847,7 +4898,7 @@ }, "H264TemporalAdaptiveQuantization": { "type": "string", - "documentation": "Keep the default value, Enabled (ENABLED), to adjust quantization within each frame based on temporal variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas of the frame that aren't moving and uses more bits on complex objects with sharp edges that move a lot. For example, this feature improves the readability of text tickers on newscasts and scoreboards on sports matches. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen that doesn't have moving objects with sharp edges, such as sports athletes' faces, you might choose to disable this feature. Related setting: When you enable temporal quantization, adjust the strength of the filter with the setting Adaptive quantization (adaptiveQuantization).", + "documentation": "Only use this setting when you change the default value, AUTO, for the setting H264AdaptiveQuantization. When you keep all defaults, excluding H264AdaptiveQuantization and all other adaptive quantization from your JSON job specification, MediaConvert automatically applies the best types of quantization for your video content. When you set H264AdaptiveQuantization to a value other than AUTO, the default value for H264TemporalAdaptiveQuantization is Enabled (ENABLED). Keep this default value to adjust quantization within each frame based on temporal variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas of the frame that aren't moving and uses more bits on complex objects with sharp edges that move a lot. For example, this feature improves the readability of text tickers on newscasts and scoreboards on sports matches. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen that doesn't have moving objects with sharp edges, such as sports athletes' faces, you might choose to set H264TemporalAdaptiveQuantization to Disabled (DISABLED). Related setting: When you enable temporal quantization, adjust the strength of the filter with the setting Adaptive quantization (adaptiveQuantization). To manually enable or disable H264TemporalAdaptiveQuantization, you must set Adaptive quantization (H264AdaptiveQuantization) to a value other than AUTO.", "enum": [ "DISABLED", "ENABLED" @@ -5458,7 +5509,7 @@ }, "HlsClientCache": { "type": "string", - "documentation": "When set to ENABLED, sets #EXT-X-ALLOW-CACHE:no tag, which prevents client from saving media segments for later replay.", + "documentation": "Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no tag. Otherwise, keep the default value Enabled (ENABLED) and control caching in your video distribution set up. For example, use the Cache-Control http header.", "enum": [ "DISABLED", "ENABLED" @@ -5565,7 +5616,7 @@ "ClientCache": { "shape": "HlsClientCache", "locationName": "clientCache", - "documentation": "When set to ENABLED, sets #EXT-X-ALLOW-CACHE:no tag, which prevents client from saving media segments for later replay." + "documentation": "Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no tag. Otherwise, keep the default value Enabled (ENABLED) and control caching in your video distribution set up. For example, use the Cache-Control http header." }, "CodecSpecification": { "shape": "HlsCodecSpecification", @@ -5896,7 +5947,7 @@ "documentation": "Enable Denoise (InputDenoiseFilter) to filter noise from the input. Default is disabled. Only applicable to MPEG2, H.264, H.265, and uncompressed video inputs." }, "FileInput": { - "shape": "__stringPatternS3MM2PPMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8WWEEBBMMLLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMXXMMLLHttpsMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8WWEEBBMMLLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMXXMMLL", + "shape": "__stringPatternS3MM2PPMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8WWEEBBMMLLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMXXMMLLOOGGGGaAHttpsMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8WWEEBBMMLLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMXXMMLLOOGGGGaA", "locationName": "fileInput", "documentation": "Specify the source file for your transcoding job. You can use multiple inputs in a single job. The service concatenates these inputs, in the order that you specify them in the job, to create the outputs. If your input format is IMF, specify your input by providing the path to your CPL. For example, \"s3://bucket/vf/cpl.xml\". If the CPL is in an incomplete IMP, make sure to use *Supplemental IMPs* (SupplementalImps) to specify any supplemental IMPs that contain assets referenced by the CPL." }, @@ -7022,6 +7073,14 @@ "ATSC" ] }, + "M2tsAudioDuration": { + "type": "string", + "documentation": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec.", + "enum": [ + "DEFAULT_CODEC_DURATION", + "MATCH_VIDEO_DURATION" + ] + }, "M2tsBufferModel": { "type": "string", "documentation": "Controls what buffer model to use for accurate interleaving. If set to MULTIPLEX, use multiplex buffer model. If set to NONE, this can lead to lower latency, but low-memory devices may not be able to play back the stream without interruptions.", @@ -7133,6 +7192,11 @@ "locationName": "audioBufferModel", "documentation": "Selects between the DVB and ATSC buffer models for Dolby Digital audio." }, + "AudioDuration": { + "shape": "M2tsAudioDuration", + "locationName": "audioDuration", + "documentation": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec." + }, "AudioFramesPerPes": { "shape": "__integerMin0Max2147483647", "locationName": "audioFramesPerPes", @@ -7311,6 +7375,14 @@ }, "documentation": "MPEG-2 TS container settings. These apply to outputs in a File output group when the output's container (ContainerType) is MPEG-2 Transport Stream (M2TS). In these assets, data is organized by the program map table (PMT). Each transport stream program contains subsets of data, including audio, video, and metadata. Each of these subsets of data has a numerical label called a packet identifier (PID). Each transport stream program corresponds to one MediaConvert output. The PMT lists the types of data in a program along with their PID. Downstream systems and players use the program map table to look up the PID for each type of data it accesses and then uses the PIDs to locate specific data within the asset." }, + "M3u8AudioDuration": { + "type": "string", + "documentation": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec.", + "enum": [ + "DEFAULT_CODEC_DURATION", + "MATCH_VIDEO_DURATION" + ] + }, "M3u8NielsenId3": { "type": "string", "documentation": "If INSERT, Nielsen inaudible tones for media tracking will be detected in the input audio and an equivalent ID3 tag will be inserted in the output.", @@ -7338,6 +7410,11 @@ "M3u8Settings": { "type": "structure", "members": { + "AudioDuration": { + "shape": "M3u8AudioDuration", + "locationName": "audioDuration", + "documentation": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec." + }, "AudioFramesPerPes": { "shape": "__integerMin0Max2147483647", "locationName": "audioFramesPerPes", @@ -7663,6 +7740,11 @@ "Mp4Settings": { "type": "structure", "members": { + "AudioDuration": { + "shape": "CmfcAudioDuration", + "locationName": "audioDuration", + "documentation": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec." + }, "CslgAtom": { "shape": "Mp4CslgAtom", "locationName": "cslgAtom", @@ -7691,6 +7773,22 @@ }, "documentation": "Settings for MP4 container. You can create audio-only AAC outputs with this container." }, + "MpdAccessibilityCaptionHints": { + "type": "string", + "documentation": "Optional. Choose Include (INCLUDE) to have MediaConvert mark up your DASH manifest with elements for embedded 608 captions. This markup isn't generally required, but some video players require it to discover and play embedded 608 captions. Keep the default value, Exclude (EXCLUDE), to leave these elements out. When you enable this setting, this is the markup that MediaConvert includes in your manifest: ", + "enum": [ + "INCLUDE", + "EXCLUDE" + ] + }, + "MpdAudioDuration": { + "type": "string", + "documentation": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec.", + "enum": [ + "DEFAULT_CODEC_DURATION", + "MATCH_VIDEO_DURATION" + ] + }, "MpdCaptionContainerType": { "type": "string", "documentation": "Use this setting only in DASH output groups that include sidecar TTML or IMSC captions. You specify sidecar captions in a separate output from your audio and video. Choose Raw (RAW) for captions in a single XML file in a raw container. Choose Fragmented MPEG-4 (FRAGMENTED_MP4) for captions in XML format contained within fragmented MP4 files. This set of fragmented MP4 files is separate from your video and audio fragmented MP4 files.", @@ -7718,6 +7816,16 @@ "MpdSettings": { "type": "structure", "members": { + "AccessibilityCaptionHints": { + "shape": "MpdAccessibilityCaptionHints", + "locationName": "accessibilityCaptionHints", + "documentation": "Optional. Choose Include (INCLUDE) to have MediaConvert mark up your DASH manifest with elements for embedded 608 captions. This markup isn't generally required, but some video players require it to discover and play embedded 608 captions. Keep the default value, Exclude (EXCLUDE), to leave these elements out. When you enable this setting, this is the markup that MediaConvert includes in your manifest: " + }, + "AudioDuration": { + "shape": "MpdAudioDuration", + "locationName": "audioDuration", + "documentation": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec." + }, "CaptionContainerType": { "shape": "MpdCaptionContainerType", "locationName": "captionContainerType", @@ -8525,6 +8633,11 @@ "OutputGroup": { "type": "structure", "members": { + "AutomatedEncodingSettings": { + "shape": "AutomatedEncodingSettings", + "locationName": "automatedEncodingSettings", + "documentation": "Use automated encoding to have MediaConvert choose your encoding settings for you, based on characteristics of your input video." + }, "CustomName": { "shape": "__string", "locationName": "customName", @@ -10521,6 +10634,11 @@ "min": 0, "max": 99 }, + "__integerMin100000Max100000000": { + "type": "integer", + "min": 100000, + "max": 100000000 + }, "__integerMin1000Max1152000000": { "type": "integer", "min": 1000, @@ -10706,6 +10824,11 @@ "min": 384000, "max": 768000 }, + "__integerMin3Max15": { + "type": "integer", + "min": 3, + "max": 15 + }, "__integerMin48000Max48000": { "type": "integer", "min": 48000, @@ -11201,9 +11324,9 @@ "type": "string", "pattern": "^s3:\\/\\/.*\\/(ASSETMAP.xml)?$" }, - "__stringPatternS3MM2PPMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8WWEEBBMMLLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMXXMMLLHttpsMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8WWEEBBMMLLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMXXMMLL": { + "__stringPatternS3MM2PPMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8WWEEBBMMLLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMXXMMLLOOGGGGaAHttpsMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8WWEEBBMMLLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMXXMMLLOOGGGGaA": { "type": "string", - "pattern": "^((s3://([^\\/]+\\/+)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[pP]|[mM]2[vV]|[mM][pP][eE][gG]|[mM][pP]3|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][kK][aA]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vV]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[wW][eE][bB][mM]|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[xX][mM][lL]))))|(https?://([^\\/]+\\/+)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[vV]|[mM][pP][eE][gG]|[mM][pP]3|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][kK][aA]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vV]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[wW][eE][bB][mM]|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[xX][mM][lL])))(\\?([^&=]+=[^&]+&)*[^&=]+=[^&]+)?))$" + "pattern": "^((s3://([^\\/]+\\/+)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[pP]|[mM]2[vV]|[mM][pP][eE][gG]|[mM][pP]3|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][kK][aA]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vV]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[wW][eE][bB][mM]|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[xX][mM][lL]|[oO][gG][gGaA]))))|(https?://([^\\/]+\\/+)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[vV]|[mM][pP][eE][gG]|[mM][pP]3|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][kK][aA]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vV]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[wW][eE][bB][mM]|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[xX][mM][lL]|[oO][gG][gGaA])))(\\?([^&=]+=[^&]+&)*[^&=]+=[^&]+)?))$" }, "__stringPatternS3MM2PPWWEEBBMMMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEEHttpsMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEE": { "type": "string", diff --git a/services/medialive/pom.xml b/services/medialive/pom.xml index 2843279bafcf..1bfb0b99d091 100644 --- a/services/medialive/pom.xml +++ b/services/medialive/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT 4.0.0 medialive diff --git a/services/medialive/src/main/resources/codegen-resources/paginators-1.json b/services/medialive/src/main/resources/codegen-resources/paginators-1.json index dafc70dc02c0..a01ca8f87c71 100644 --- a/services/medialive/src/main/resources/codegen-resources/paginators-1.json +++ b/services/medialive/src/main/resources/codegen-resources/paginators-1.json @@ -53,6 +53,12 @@ "output_token": "NextToken", "limit_key": "MaxResults", "result_key": "InputDevices" + }, + "ListInputDeviceTransfers": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "InputDeviceTransfers" } } } diff --git a/services/medialive/src/main/resources/codegen-resources/service-2.json b/services/medialive/src/main/resources/codegen-resources/service-2.json index cc6130b4ad78..ed2c34200302 100644 --- a/services/medialive/src/main/resources/codegen-resources/service-2.json +++ b/services/medialive/src/main/resources/codegen-resources/service-2.json @@ -12,6 +12,60 @@ "jsonVersion": "1.1" }, "operations": { + "AcceptInputDeviceTransfer": { + "name": "AcceptInputDeviceTransfer", + "http": { + "method": "POST", + "requestUri": "/prod/inputDevices/{inputDeviceId}/accept", + "responseCode": 200 + }, + "input": { + "shape": "AcceptInputDeviceTransferRequest" + }, + "output": { + "shape": "AcceptInputDeviceTransferResponse", + "documentation": "Your request to accept this device transfer has succeeded." + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "This request was invalid." + }, + { + "shape": "UnprocessableEntityException", + "documentation": "Transfer operation failed, input could not be validated." + }, + { + "shape": "InternalServerErrorException", + "documentation": "Unexpected internal service error." + }, + { + "shape": "ForbiddenException", + "documentation": "You do not have permission to accept input device transfers." + }, + { + "shape": "BadGatewayException", + "documentation": "Bad gateway error." + }, + { + "shape": "NotFoundException", + "documentation": "Input device not found." + }, + { + "shape": "GatewayTimeoutException", + "documentation": "Gateway Timeout Error" + }, + { + "shape": "TooManyRequestsException", + "documentation": "Request limit exceeded on transfer device calls to the input device service." + }, + { + "shape": "ConflictException", + "documentation": "Input device transfer could not be accepted." + } + ], + "documentation": "Accept an incoming input device transfer. The ownership of the device will transfer to your AWS account." + }, "BatchDelete": { "name": "BatchDelete", "http": { @@ -212,6 +266,60 @@ ], "documentation": "Update a channel schedule" }, + "CancelInputDeviceTransfer": { + "name": "CancelInputDeviceTransfer", + "http": { + "method": "POST", + "requestUri": "/prod/inputDevices/{inputDeviceId}/cancel", + "responseCode": 200 + }, + "input": { + "shape": "CancelInputDeviceTransferRequest" + }, + "output": { + "shape": "CancelInputDeviceTransferResponse", + "documentation": "Your cancel request has succeeded." + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "This request was invalid." + }, + { + "shape": "UnprocessableEntityException", + "documentation": "Transfer operation failed, input could not be validated." + }, + { + "shape": "InternalServerErrorException", + "documentation": "Unexpected internal service error." + }, + { + "shape": "ForbiddenException", + "documentation": "You do not have permission to cancel input device transfers." + }, + { + "shape": "BadGatewayException", + "documentation": "Bad gateway error." + }, + { + "shape": "NotFoundException", + "documentation": "Input device not found." + }, + { + "shape": "GatewayTimeoutException", + "documentation": "Gateway Timeout Error" + }, + { + "shape": "TooManyRequestsException", + "documentation": "Request limit exceeded on transfer device calls to the input device service." + }, + { + "shape": "ConflictException", + "documentation": "Input device transfer could not be canceled." + } + ], + "documentation": "Cancel an input device transfer that you have requested." + }, "CreateChannel": { "name": "CreateChannel", "http": { @@ -1350,6 +1458,52 @@ ], "documentation": "Produces list of channels that have been created" }, + "ListInputDeviceTransfers": { + "name": "ListInputDeviceTransfers", + "http": { + "method": "GET", + "requestUri": "/prod/inputDeviceTransfers", + "responseCode": 200 + }, + "input": { + "shape": "ListInputDeviceTransfersRequest" + }, + "output": { + "shape": "ListInputDeviceTransfersResponse", + "documentation": "An array of input devices that are currently being transferred." + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "This request was invalid." + }, + { + "shape": "UnprocessableEntityException", + "documentation": "Transfer operation failed, input could not be validated." + }, + { + "shape": "InternalServerErrorException", + "documentation": "Unexpected internal service error." + }, + { + "shape": "ForbiddenException", + "documentation": "You do not have permission to list transferring devices." + }, + { + "shape": "BadGatewayException", + "documentation": "Bad gateway error." + }, + { + "shape": "GatewayTimeoutException", + "documentation": "Gateway Timeout Error" + }, + { + "shape": "TooManyRequestsException", + "documentation": "Request limit exceeded on list devices calls to the input device service." + } + ], + "documentation": "List input devices that are currently being transferred. List input devices that you are transferring from your AWS account or input devices that another AWS account is transferring to you." + }, "ListInputDevices": { "name": "ListInputDevices", "http": { @@ -1732,6 +1886,60 @@ ], "documentation": "Purchase an offering and create a reservation." }, + "RejectInputDeviceTransfer": { + "name": "RejectInputDeviceTransfer", + "http": { + "method": "POST", + "requestUri": "/prod/inputDevices/{inputDeviceId}/reject", + "responseCode": 200 + }, + "input": { + "shape": "RejectInputDeviceTransferRequest" + }, + "output": { + "shape": "RejectInputDeviceTransferResponse", + "documentation": "Your reject request has succeeded." + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "This request was invalid." + }, + { + "shape": "UnprocessableEntityException", + "documentation": "Transfer operation failed, input could not be validated." + }, + { + "shape": "InternalServerErrorException", + "documentation": "Unexpected internal service error." + }, + { + "shape": "ForbiddenException", + "documentation": "You do not have permission to reject input device transfers." + }, + { + "shape": "BadGatewayException", + "documentation": "Bad gateway error." + }, + { + "shape": "NotFoundException", + "documentation": "Input device not found." + }, + { + "shape": "GatewayTimeoutException", + "documentation": "Gateway Timeout Error" + }, + { + "shape": "TooManyRequestsException", + "documentation": "Request limit exceeded on transfer device calls to the input device service." + }, + { + "shape": "ConflictException", + "documentation": "Input device transfer could not be rejected." + } + ], + "documentation": "Reject the transfer of the specified input device to your AWS account." + }, "StartChannel": { "name": "StartChannel", "http": { @@ -1932,6 +2140,60 @@ ], "documentation": "Stops a running multiplex. If the multiplex isn't running, this action has no effect." }, + "TransferInputDevice": { + "name": "TransferInputDevice", + "http": { + "method": "POST", + "requestUri": "/prod/inputDevices/{inputDeviceId}/transfer", + "responseCode": 200 + }, + "input": { + "shape": "TransferInputDeviceRequest" + }, + "output": { + "shape": "TransferInputDeviceResponse", + "documentation": "The device transfer request has started." + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "This request was invalid." + }, + { + "shape": "UnprocessableEntityException", + "documentation": "Transfer operation failed, input could not be validated." + }, + { + "shape": "InternalServerErrorException", + "documentation": "Unexpected internal service error." + }, + { + "shape": "ForbiddenException", + "documentation": "You do not have permission to transfer input devices." + }, + { + "shape": "BadGatewayException", + "documentation": "Bad gateway error." + }, + { + "shape": "NotFoundException", + "documentation": "Input device not found." + }, + { + "shape": "GatewayTimeoutException", + "documentation": "Gateway Timeout Error" + }, + { + "shape": "TooManyRequestsException", + "documentation": "Request limit exceeded on transfer device calls to the input device service." + }, + { + "shape": "ConflictException", + "documentation": "Input device could not be transferred." + } + ], + "documentation": "Start an input device transfer to another AWS account. After you make the request, the other account must accept or reject the transfer." + }, "UpdateChannel": { "name": "UpdateChannel", "http": { @@ -2528,6 +2790,27 @@ }, "documentation": "Ac3 Settings" }, + "AcceptInputDeviceTransferRequest": { + "type": "structure", + "members": { + "InputDeviceId": { + "shape": "__string", + "location": "uri", + "locationName": "inputDeviceId", + "documentation": "The unique ID of the input device to accept. For example, hd-123456789abcdef." + } + }, + "required": [ + "InputDeviceId" + ], + "documentation": "Placeholder documentation for AcceptInputDeviceTransferRequest" + }, + "AcceptInputDeviceTransferResponse": { + "type": "structure", + "members": { + }, + "documentation": "Placeholder documentation for AcceptInputDeviceTransferResponse" + }, "AccessDenied": { "type": "structure", "members": { @@ -2960,6 +3243,16 @@ "AutomaticInputFailoverSettings": { "type": "structure", "members": { + "ErrorClearTimeMsec": { + "shape": "__integerMin1", + "locationName": "errorClearTimeMsec", + "documentation": "This clear time defines the requirement a recovered input must meet to be considered healthy. The input must have no failover conditions for this length of time. Enter a time in milliseconds. This value is particularly important if the input_preference for the failover pair is set to PRIMARY_INPUT_PREFERRED, because after this time, MediaLive will switch back to the primary input." + }, + "FailoverConditions": { + "shape": "__listOfFailoverCondition", + "locationName": "failoverConditions", + "documentation": "A list of failover conditions. If any of these conditions occur, MediaLive will perform a failover to the other input." + }, "InputPreference": { "shape": "InputPreference", "locationName": "inputPreference", @@ -3622,6 +3915,27 @@ "SCALED" ] }, + "CancelInputDeviceTransferRequest": { + "type": "structure", + "members": { + "InputDeviceId": { + "shape": "__string", + "location": "uri", + "locationName": "inputDeviceId", + "documentation": "The unique ID of the input device to cancel. For example, hd-123456789abcdef." + } + }, + "required": [ + "InputDeviceId" + ], + "documentation": "Placeholder documentation for CancelInputDeviceTransferRequest" + }, + "CancelInputDeviceTransferResponse": { + "type": "structure", + "members": { + }, + "documentation": "Placeholder documentation for CancelInputDeviceTransferResponse" + }, "CaptionDescription": { "type": "structure", "members": { @@ -5127,6 +5441,11 @@ "locationName": "deviceSettingsSyncState", "documentation": "The status of the action to synchronize the device configuration. If you change the configuration of the input device (for example, the maximum bitrate), MediaLive sends the new data to the device. The device might not update itself immediately. SYNCED means the device has updated its configuration. SYNCING means that it has not updated its configuration." }, + "DeviceUpdateStatus": { + "shape": "DeviceUpdateStatus", + "locationName": "deviceUpdateStatus", + "documentation": "The status of software on the input device." + }, "HdDeviceSettings": { "shape": "InputDeviceHdSettings", "locationName": "hdDeviceSettings", @@ -5728,6 +6047,14 @@ "SYNCING" ] }, + "DeviceUpdateStatus": { + "type": "string", + "documentation": "The status of software on the input device.", + "enum": [ + "UP_TO_DATE", + "NOT_UP_TO_DATE" + ] + }, "DvbNitSettings": { "type": "structure", "members": { @@ -6356,6 +6683,28 @@ "TimecodeConfig" ] }, + "FailoverCondition": { + "type": "structure", + "members": { + "FailoverConditionSettings": { + "shape": "FailoverConditionSettings", + "locationName": "failoverConditionSettings", + "documentation": "Failover condition type-specific settings." + } + }, + "documentation": "Failover Condition settings. There can be multiple failover conditions inside AutomaticInputFailoverSettings." + }, + "FailoverConditionSettings": { + "type": "structure", + "members": { + "InputLossSettings": { + "shape": "InputLossFailoverSettings", + "locationName": "inputLossSettings", + "documentation": "MediaLive will perform a failover if content is not detected in this input for the specified period." + } + }, + "documentation": "Settings for one failover condition." + }, "FeatureActivations": { "type": "structure", "members": { @@ -7545,6 +7894,14 @@ "SUBDIRECTORY_PER_STREAM" ] }, + "HlsDiscontinuityTags": { + "type": "string", + "documentation": "Hls Discontinuity Tags", + "enum": [ + "INSERT", + "NEVER_INSERT" + ] + }, "HlsEncryptionType": { "type": "string", "documentation": "Hls Encryption Type", @@ -7616,6 +7973,11 @@ "locationName": "directoryStructure", "documentation": "Place segments in subdirectories." }, + "DiscontinuityTags": { + "shape": "HlsDiscontinuityTags", + "locationName": "discontinuityTags", + "documentation": "Specifies whether to insert EXT-X-DISCONTINUITY tags in the HLS child manifests for this output group.\nTypically, choose Insert because these tags are required in the manifest (according to the HLS specification) and serve an important purpose.\nChoose Never Insert only if the downstream system is doing real-time failover (without using the MediaLive automatic failover feature) and only if that downstream system has advised you to exclude the tags." + }, "EncryptionType": { "shape": "HlsEncryptionType", "locationName": "encryptionType", @@ -7636,6 +7998,11 @@ "locationName": "iFrameOnlyPlaylists", "documentation": "DISABLED: Do not create an I-frame-only manifest, but do create the master and media manifests (according to the Output Selection field).\n\nSTANDARD: Create an I-frame-only manifest for each output that contains video, as well as the other manifests (according to the Output Selection field). The I-frame manifest contains a #EXT-X-I-FRAMES-ONLY tag to indicate it is I-frame only, and one or more #EXT-X-BYTERANGE entries identifying the I-frame position. For example, #EXT-X-BYTERANGE:160364@1461888\"" }, + "IncompleteSegmentBehavior": { + "shape": "HlsIncompleteSegmentBehavior", + "locationName": "incompleteSegmentBehavior", + "documentation": "Specifies whether to include the final (incomplete) segment in the media output when the pipeline stops producing output because of a channel stop, a channel pause or a loss of input to the pipeline.\nAuto means that MediaLive decides whether to include the final segment, depending on the channel class and the types of output groups.\nSuppress means to never include the incomplete segment. We recommend you choose Auto and let MediaLive control the behavior." + }, "IndexNSegments": { "shape": "__integerMin3", "locationName": "indexNSegments", @@ -7792,6 +8159,14 @@ "ENABLED" ] }, + "HlsIncompleteSegmentBehavior": { + "type": "string", + "documentation": "Hls Incomplete Segment Behavior", + "enum": [ + "AUTO", + "SUPPRESS" + ] + }, "HlsInputSettings": { "type": "structure", "members": { @@ -8320,6 +8695,11 @@ "locationName": "deviceSettingsSyncState", "documentation": "The status of the action to synchronize the device configuration. If you change the configuration of the input device (for example, the maximum bitrate), MediaLive sends the new data to the device. The device might not update itself immediately. SYNCED means the device has updated its configuration. SYNCING means that it has not updated its configuration." }, + "DeviceUpdateStatus": { + "shape": "DeviceUpdateStatus", + "locationName": "deviceUpdateStatus", + "documentation": "The status of software on the input device." + }, "HdDeviceSettings": { "shape": "InputDeviceHdSettings", "locationName": "hdDeviceSettings", @@ -8556,6 +8936,11 @@ "locationName": "deviceSettingsSyncState", "documentation": "The status of the action to synchronize the device configuration. If you change the configuration of the input device (for example, the maximum bitrate), MediaLive sends the new data to the device. The device might not update itself immediately. SYNCED means the device has updated its configuration. SYNCING means that it has not updated its configuration." }, + "DeviceUpdateStatus": { + "shape": "DeviceUpdateStatus", + "locationName": "deviceUpdateStatus", + "documentation": "The status of software on the input device." + }, "HdDeviceSettings": { "shape": "InputDeviceHdSettings", "locationName": "hdDeviceSettings", @@ -8594,6 +8979,14 @@ }, "documentation": "Details of the input device." }, + "InputDeviceTransferType": { + "type": "string", + "documentation": "The type of device transfer. INCOMING for an input device that is being transferred to you, OUTGOING for an input device that you are transferring to another AWS account.", + "enum": [ + "OUTGOING", + "INCOMING" + ] + }, "InputDeviceType": { "type": "string", "documentation": "The type of the input device. For an AWS Elemental Link device that outputs resolutions up to 1080, choose \"HD\".", @@ -8698,6 +9091,17 @@ }, "documentation": "Input Loss Behavior" }, + "InputLossFailoverSettings": { + "type": "structure", + "members": { + "InputLossThresholdMsec": { + "shape": "__integerMin100", + "locationName": "inputLossThresholdMsec", + "documentation": "The amount of time (in milliseconds) that no input is detected. After that time, an input failover will occur." + } + }, + "documentation": "MediaLive will perform a failover if content is not detected in this input for the specified period." + }, "InputLossImageType": { "type": "string", "documentation": "Input Loss Image Type", @@ -9155,6 +9559,62 @@ }, "documentation": "Placeholder documentation for ListChannelsResultModel" }, + "ListInputDeviceTransfersRequest": { + "type": "structure", + "members": { + "MaxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "maxResults" + }, + "NextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken" + }, + "TransferType": { + "shape": "__string", + "location": "querystring", + "locationName": "transferType" + } + }, + "required": [ + "TransferType" + ], + "documentation": "Placeholder documentation for ListInputDeviceTransfersRequest" + }, + "ListInputDeviceTransfersResponse": { + "type": "structure", + "members": { + "InputDeviceTransfers": { + "shape": "__listOfTransferringInputDeviceSummary", + "locationName": "inputDeviceTransfers", + "documentation": "The list of devices that you are transferring or are being transferred to you." + }, + "NextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "A token to get additional list results." + } + }, + "documentation": "Placeholder documentation for ListInputDeviceTransfersResponse" + }, + "ListInputDeviceTransfersResultModel": { + "type": "structure", + "members": { + "InputDeviceTransfers": { + "shape": "__listOfTransferringInputDeviceSummary", + "locationName": "inputDeviceTransfers", + "documentation": "The list of devices that you are transferring or are being transferred to you." + }, + "NextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "A token to get additional list results." + } + }, + "documentation": "The list of input devices in the transferred state. The recipient hasn't yet accepted or rejected the transfer." + }, "ListInputDevicesRequest": { "type": "structure", "members": { @@ -11504,6 +11964,27 @@ }, "documentation": "Rec709 Settings" }, + "RejectInputDeviceTransferRequest": { + "type": "structure", + "members": { + "InputDeviceId": { + "shape": "__string", + "location": "uri", + "locationName": "inputDeviceId", + "documentation": "The unique ID of the input device to reject. For example, hd-123456789abcdef." + } + }, + "required": [ + "InputDeviceId" + ], + "documentation": "Placeholder documentation for RejectInputDeviceTransferRequest" + }, + "RejectInputDeviceTransferResponse": { + "type": "structure", + "members": { + }, + "documentation": "Placeholder documentation for RejectInputDeviceTransferResponse" + }, "RemixSettings": { "type": "structure", "members": { @@ -11765,6 +12246,13 @@ }, "documentation": "Placeholder documentation for ResourceNotFound" }, + "RtmpAdMarkers": { + "type": "string", + "documentation": "Rtmp Ad Markers", + "enum": [ + "ON_CUE_POINT_SCTE35" + ] + }, "RtmpCacheFullBehavior": { "type": "string", "documentation": "Rtmp Cache Full Behavior", @@ -11791,6 +12279,11 @@ "RtmpGroupSettings": { "type": "structure", "members": { + "AdMarkers": { + "shape": "__listOfRtmpAdMarkers", + "locationName": "adMarkers", + "documentation": "Choose the ad marker type for this output group. MediaLive will create a message based on the content of each SCTE-35 message, format it for that marker type, and insert it in the datastream." + }, "AuthenticationScheme": { "shape": "AuthenticationScheme", "locationName": "authenticationScheme", @@ -12968,6 +13461,17 @@ "STRENGTH_16" ] }, + "ThumbnailData": { + "type": "structure", + "members": { + "Body": { + "shape": "__string", + "locationName": "body", + "documentation": "The binary data for the thumbnail that the Link device has most recently sent to MediaLive." + } + }, + "documentation": "The binary data for the thumbnail that the Link device has most recently sent to MediaLive." + }, "TimecodeConfig": { "type": "structure", "members": { @@ -13010,6 +13514,79 @@ }, "documentation": "Placeholder documentation for TooManyRequestsException" }, + "TransferInputDevice": { + "type": "structure", + "members": { + "TargetCustomerId": { + "shape": "__string", + "locationName": "targetCustomerId", + "documentation": "The AWS account ID (12 digits) for the recipient of the device transfer." + }, + "TransferMessage": { + "shape": "__string", + "locationName": "transferMessage", + "documentation": "An optional message for the recipient. Maximum 280 characters." + } + }, + "documentation": "The transfer details of the input device." + }, + "TransferInputDeviceRequest": { + "type": "structure", + "members": { + "InputDeviceId": { + "shape": "__string", + "location": "uri", + "locationName": "inputDeviceId", + "documentation": "The unique ID of this input device. For example, hd-123456789abcdef." + }, + "TargetCustomerId": { + "shape": "__string", + "locationName": "targetCustomerId", + "documentation": "The AWS account ID (12 digits) for the recipient of the device transfer." + }, + "TransferMessage": { + "shape": "__string", + "locationName": "transferMessage", + "documentation": "An optional message for the recipient. Maximum 280 characters." + } + }, + "documentation": "A request to transfer an input device.", + "required": [ + "InputDeviceId" + ] + }, + "TransferInputDeviceResponse": { + "type": "structure", + "members": { + }, + "documentation": "Placeholder documentation for TransferInputDeviceResponse" + }, + "TransferringInputDeviceSummary": { + "type": "structure", + "members": { + "Id": { + "shape": "__string", + "locationName": "id", + "documentation": "The unique ID of the input device." + }, + "Message": { + "shape": "__string", + "locationName": "message", + "documentation": "The optional message that the sender has attached to the transfer." + }, + "TargetCustomerId": { + "shape": "__string", + "locationName": "targetCustomerId", + "documentation": "The AWS account ID for the recipient of the input device transfer." + }, + "TransferType": { + "shape": "InputDeviceTransferType", + "locationName": "transferType", + "documentation": "The type (direction) of the input device transfer." + } + }, + "documentation": "Details about the input device that is being transferred." + }, "TtmlDestinationSettings": { "type": "structure", "members": { @@ -13392,6 +13969,11 @@ "locationName": "deviceSettingsSyncState", "documentation": "The status of the action to synchronize the device configuration. If you change the configuration of the input device (for example, the maximum bitrate), MediaLive sends the new data to the device. The device might not update itself immediately. SYNCED means the device has updated its configuration. SYNCING means that it has not updated its configuration." }, + "DeviceUpdateStatus": { + "shape": "DeviceUpdateStatus", + "locationName": "deviceUpdateStatus", + "documentation": "The status of software on the input device." + }, "HdDeviceSettings": { "shape": "InputDeviceHdSettings", "locationName": "hdDeviceSettings", @@ -14071,6 +14653,11 @@ "min": 1, "documentation": "Placeholder documentation for __integerMin1" }, + "__integerMin100": { + "type": "integer", + "min": 100, + "documentation": "Placeholder documentation for __integerMin100" + }, "__integerMin1000": { "type": "integer", "min": 1000, @@ -14319,6 +14906,13 @@ }, "documentation": "Placeholder documentation for __listOfChannelSummary" }, + "__listOfFailoverCondition": { + "type": "list", + "member": { + "shape": "FailoverCondition" + }, + "documentation": "Placeholder documentation for __listOfFailoverCondition" + }, "__listOfHlsAdMarkers": { "type": "list", "member": { @@ -14522,6 +15116,13 @@ }, "documentation": "Placeholder documentation for __listOfReservation" }, + "__listOfRtmpAdMarkers": { + "type": "list", + "member": { + "shape": "RtmpAdMarkers" + }, + "documentation": "Placeholder documentation for __listOfRtmpAdMarkers" + }, "__listOfScheduleAction": { "type": "list", "member": { @@ -14536,6 +15137,13 @@ }, "documentation": "Placeholder documentation for __listOfScte35Descriptor" }, + "__listOfTransferringInputDeviceSummary": { + "type": "list", + "member": { + "shape": "TransferringInputDeviceSummary" + }, + "documentation": "Placeholder documentation for __listOfTransferringInputDeviceSummary" + }, "__listOfValidationError": { "type": "list", "member": { diff --git a/services/mediapackage/pom.xml b/services/mediapackage/pom.xml index 9882ed739507..e2d9cf3ae7b4 100644 --- a/services/mediapackage/pom.xml +++ b/services/mediapackage/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT 4.0.0 mediapackage diff --git a/services/mediapackage/src/main/resources/codegen-resources/customization.config b/services/mediapackage/src/main/resources/codegen-resources/customization.config index 80aeab17626a..ee587bd61c35 100644 --- a/services/mediapackage/src/main/resources/codegen-resources/customization.config +++ b/services/mediapackage/src/main/resources/codegen-resources/customization.config @@ -5,5 +5,10 @@ ], "deprecatedOperations": [ "RotateChannelCredentials" - ] + ], + "renameShapes": { + // Do not keep adding to this list. Require the service team to name enums like they're naming their shapes. + "__AdTriggersElement": "AdTriggersElement", + "__PeriodTriggersElement": "PeriodTriggersElement" + } } diff --git a/services/mediapackagevod/pom.xml b/services/mediapackagevod/pom.xml index 43d7bb95f9eb..5cadbdec5752 100644 --- a/services/mediapackagevod/pom.xml +++ b/services/mediapackagevod/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT mediapackagevod AWS Java SDK :: Services :: MediaPackage Vod diff --git a/services/mediapackagevod/src/main/resources/codegen-resources/customization.config b/services/mediapackagevod/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..3a8d27a41544 --- /dev/null +++ b/services/mediapackagevod/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,6 @@ +{ + "renameShapes": { + // Do not keep adding to this list. Require the service team to name enums like they're naming their shapes. + "__PeriodTriggersElement": "PeriodTriggersElement" + } +} diff --git a/services/mediastore/pom.xml b/services/mediastore/pom.xml index 325f12a3cf93..facc1dc1195c 100644 --- a/services/mediastore/pom.xml +++ b/services/mediastore/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT 4.0.0 mediastore diff --git a/services/mediastoredata/pom.xml b/services/mediastoredata/pom.xml index bb50e4872289..e4c61f1ba0b8 100644 --- a/services/mediastoredata/pom.xml +++ b/services/mediastoredata/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT 4.0.0 mediastoredata diff --git a/services/mediatailor/pom.xml b/services/mediatailor/pom.xml index 770c32bbcfca..af1e304473ff 100644 --- a/services/mediatailor/pom.xml +++ b/services/mediatailor/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT mediatailor AWS Java SDK :: Services :: MediaTailor diff --git a/services/mediatailor/src/main/resources/codegen-resources/service-2.json b/services/mediatailor/src/main/resources/codegen-resources/service-2.json index 5c0ddda05cdd..53164a8d54f8 100644 --- a/services/mediatailor/src/main/resources/codegen-resources/service-2.json +++ b/services/mediatailor/src/main/resources/codegen-resources/service-2.json @@ -141,6 +141,16 @@ } }, "shapes": { + "AdMarkerPassthrough" : { + "type" : "structure", + "members" : { + "Enabled" : { + "documentation": "

For HLS, when set to true, MediaTailor passes through EXT-X-CUE-IN, EXT-X-CUE-OUT, and EXT-X-SPLICEPOINT-SCTE35 ad markers from the origin manifest to the MediaTailor personalized manifest.

No logic is applied to these ad markers. For example, if EXT-X-CUE-OUT has a value of 60, but no ads are filled for that ad break, MediaTailor will not set the value to 0.

", + "shape" : "__boolean" + } + }, + "documentation" : "

The configuration for Ad Marker Passthrough. Ad marker passthrough can be used to pass ad markers from the origin to the customized manifest.

" + }, "AvailSuppression": { "type": "structure", "documentation" : "

The configuration for Avail Suppression. Ad suppression can be used to turn off ad personalization in a long manifest, or if a viewer joins mid-break.

", @@ -294,6 +304,10 @@ "shape" : "LivePreRollConfiguration", "documentation" : "

The configuration for pre-roll ad insertion.

" }, + "ManifestProcessingRules": { + "shape" : "ManifestProcessingRules", + "documentation" : "

The configuration for manifest processing rules. Manifest processing rules enable customization of the personalized manifests created by MediaTailor.

" + }, "Name": { "documentation": "

The identifier for the playback configuration.

", "shape": "__string" @@ -405,6 +419,15 @@ ], "type": "string" }, + "ManifestProcessingRules" : { + "type" : "structure", + "documentation" : "

The configuration for manifest processing rules. Manifest processing rules enable customization of the personalized manifests created by MediaTailor.

", + "members" : { + "AdMarkerPassthrough" : { + "shape" : "AdMarkerPassthrough" + } + } + }, "Mode": { "enum": [ "OFF", @@ -439,6 +462,10 @@ "documentation": "

The configuration for HLS content.

", "shape": "HlsConfiguration" }, + "ManifestProcessingRules": { + "shape" : "ManifestProcessingRules", + "documentation" : "

The configuration for manifest processing rules. Manifest processing rules enable customization of the personalized manifests created by MediaTailor.

" + }, "Name": { "documentation": "

The identifier for the playback configuration.

", "shape": "__string" @@ -519,6 +546,10 @@ "shape" : "LivePreRollConfiguration", "documentation" : "

The configuration for pre-roll ad insertion.

" }, + "ManifestProcessingRules": { + "shape" : "ManifestProcessingRules", + "documentation": "

The configuration for manifest processing rules. Manifest processing rules enable customization of the personalized manifests created by MediaTailor.

" + }, "Name": { "documentation": "

The identifier for the playback configuration.

", "shape": "__string" @@ -577,6 +608,10 @@ "shape" : "LivePreRollConfiguration", "documentation" : "

The configuration for pre-roll ad insertion.

" }, + "ManifestProcessingRules": { + "shape" : "ManifestProcessingRules", + "documentation": "The configuration for manifest processing rules. Manifest processing rules enable customization of the personalized manifests created by MediaTailor." + }, "Name": { "documentation": "

The identifier for the playback configuration.

", "shape": "__string" diff --git a/services/migrationhub/pom.xml b/services/migrationhub/pom.xml index 3104fc4241e4..de8f4df17f05 100644 --- a/services/migrationhub/pom.xml +++ b/services/migrationhub/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT 4.0.0 migrationhub diff --git a/services/migrationhubconfig/pom.xml b/services/migrationhubconfig/pom.xml index d84e2150b012..31c26cfbd7a4 100644 --- a/services/migrationhubconfig/pom.xml +++ b/services/migrationhubconfig/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT migrationhubconfig AWS Java SDK :: Services :: MigrationHub Config diff --git a/services/mobile/pom.xml b/services/mobile/pom.xml index 1941b8d53a6f..4d6108d8b97b 100644 --- a/services/mobile/pom.xml +++ b/services/mobile/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT 4.0.0 mobile diff --git a/services/mq/pom.xml b/services/mq/pom.xml index 3f6a8442fd36..e960925f053a 100644 --- a/services/mq/pom.xml +++ b/services/mq/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT 4.0.0 mq diff --git a/services/mq/src/main/resources/codegen-resources/service-2.json b/services/mq/src/main/resources/codegen-resources/service-2.json index 563b32376a3a..790b077e86b2 100644 --- a/services/mq/src/main/resources/codegen-resources/service-2.json +++ b/services/mq/src/main/resources/codegen-resources/service-2.json @@ -725,7 +725,7 @@ "ConsoleURL" : { "shape" : "__string", "locationName" : "consoleURL", - "documentation" : "The URL of the broker's ActiveMQ Web Console." + "documentation" : "The URL of the broker's Web Console." }, "Endpoints" : { "shape" : "__listOf__string", @@ -735,7 +735,7 @@ "IpAddress" : { "shape" : "__string", "locationName" : "ipAddress", - "documentation" : "The IP address of the Elastic Network Interface (ENI) attached to the broker." + "documentation" : "The IP address of the Elastic Network Interface (ENI) attached to the broker. Does not apply to RabbitMQ brokers" } }, "documentation" : "Returns information about all brokers." @@ -804,7 +804,7 @@ }, "BrokerStorageType" : { "type" : "string", - "documentation" : "The storage type of the broker.", + "documentation" : "The broker's storage type. EFS is currently not Supported for RabbitMQ engine type.", "enum" : [ "EBS", "EFS" ] }, "BrokerSummary" : { @@ -840,6 +840,11 @@ "locationName" : "deploymentMode", "documentation" : "Required. The deployment mode of the broker." }, + "EngineType" : { + "shape" : "EngineType", + "locationName" : "engineType", + "documentation" : "Required. The type of broker engine." + }, "HostInstanceType" : { "shape" : "__string", "locationName" : "hostInstanceType", @@ -879,7 +884,7 @@ "EngineType" : { "shape" : "EngineType", "locationName" : "engineType", - "documentation" : "Required. The type of broker engine. Note: Currently, Amazon MQ supports only ACTIVEMQ." + "documentation" : "Required. The type of broker engine. Note: Currently, Amazon MQ supports ACTIVEMQ and RABBITMQ." }, "EngineVersion" : { "shape" : "__string", @@ -923,7 +928,7 @@ "documentation" : "The revision number of the configuration." } }, - "documentation" : "A list of information about the configuration." + "documentation" : "A list of information about the configuration. Does not apply to RabbitMQ brokers." }, "ConfigurationRevision" : { "type" : "structure", @@ -1029,7 +1034,7 @@ "EngineType" : { "shape" : "EngineType", "locationName" : "engineType", - "documentation" : "Required. The type of broker engine. Note: Currently, Amazon MQ supports only ACTIVEMQ." + "documentation" : "Required. The type of broker engine. Note: Currently, Amazon MQ supports ACTIVEMQ and RABBITMQ." }, "EngineVersion" : { "shape" : "__string", @@ -1074,7 +1079,7 @@ "SubnetIds" : { "shape" : "__listOf__string", "locationName" : "subnetIds", - "documentation" : "The list of groups (2 maximum) that define which subnets and IP ranges the broker can use from different Availability Zones. A SINGLE_INSTANCE deployment requires one subnet (for example, the default subnet). An ACTIVE_STANDBY_MULTI_AZ deployment requires two subnets." + "documentation" : "The list of groups that define which subnets and IP ranges the broker can use from different Availability Zones. A SINGLE_INSTANCE deployment requires one subnet (for example, the default subnet). An ACTIVE_STANDBY_MULTI_AZ deployment (ACTIVEMQ) requires two subnets. A CLUSTER_MULTI_AZ deployment (RABBITMQ) has no subnet requirements when deployed with public accessibility, deployment without public accessibility requires at least one subnet." }, "Tags" : { "shape" : "__mapOf__string", @@ -1084,7 +1089,7 @@ "Users" : { "shape" : "__listOfUser", "locationName" : "users", - "documentation" : "Required. The list of ActiveMQ users (persons or applications) who can access queues and topics. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long." + "documentation" : "Required. The list of broker users (persons or applications) who can access queues and topics. For RabbitMQ brokers, one and only one administrative user is accepted and created when a broker is first provisioned. All subsequent broker users are created by making RabbitMQ API calls directly to brokers or via the RabbitMQ Web Console. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long." } }, "documentation" : "Required. The version of the broker engine. For a list of supported engine versions, see https://docs.aws.amazon.com/amazon-mq/latest/developer-guide/broker-engine.html" @@ -1147,7 +1152,7 @@ "EngineType" : { "shape" : "EngineType", "locationName" : "engineType", - "documentation" : "Required. The type of broker engine. Note: Currently, Amazon MQ supports only ACTIVEMQ." + "documentation" : "Required. The type of broker engine. Note: Currently, Amazon MQ supports ACTIVEMQ and RABBITMQ." }, "EngineVersion" : { "shape" : "__string", @@ -1192,7 +1197,7 @@ "SubnetIds" : { "shape" : "__listOf__string", "locationName" : "subnetIds", - "documentation" : "The list of groups (2 maximum) that define which subnets and IP ranges the broker can use from different Availability Zones. A SINGLE_INSTANCE deployment requires one subnet (for example, the default subnet). An ACTIVE_STANDBY_MULTI_AZ deployment requires two subnets." + "documentation" : "The list of groups that define which subnets and IP ranges the broker can use from different Availability Zones. A SINGLE_INSTANCE deployment requires one subnet (for example, the default subnet). An ACTIVE_STANDBY_MULTI_AZ deployment (ACTIVEMQ) requires two subnets. A CLUSTER_MULTI_AZ deployment (RABBITMQ) has no subnet requirements when deployed with public accessibility, deployment without public accessibility requires at least one subnet." }, "Tags" : { "shape" : "__mapOf__string", @@ -1202,7 +1207,7 @@ "Users" : { "shape" : "__listOfUser", "locationName" : "users", - "documentation" : "Required. The list of ActiveMQ users (persons or applications) who can access queues and topics. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long." + "documentation" : "Required. The list of broker users (persons or applications) who can access queues and topics. For RabbitMQ brokers, one and only one administrative user is accepted and created when a broker is first provisioned. All subsequent broker users are created by making RabbitMQ API calls directly to brokers or via the RabbitMQ Web Console. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long." } }, "documentation" : "Creates a broker using the specified properties." @@ -1233,7 +1238,7 @@ "EngineType" : { "shape" : "EngineType", "locationName" : "engineType", - "documentation" : "Required. The type of broker engine. Note: Currently, Amazon MQ supports only ACTIVEMQ." + "documentation" : "Required. The type of broker engine. Note: Currently, Amazon MQ supports ACTIVEMQ and RABBITMQ." }, "EngineVersion" : { "shape" : "__string", @@ -1300,7 +1305,7 @@ "EngineType" : { "shape" : "EngineType", "locationName" : "engineType", - "documentation" : "Required. The type of broker engine. Note: Currently, Amazon MQ supports only ACTIVEMQ." + "documentation" : "Required. The type of broker engine. Note: Currently, Amazon MQ supports ACTIVEMQ and RABBITMQ." }, "EngineVersion" : { "shape" : "__string", @@ -1512,7 +1517,7 @@ "DeploymentMode" : { "type" : "string", "documentation" : "The deployment mode of the broker.", - "enum" : [ "SINGLE_INSTANCE", "ACTIVE_STANDBY_MULTI_AZ" ] + "enum" : [ "SINGLE_INSTANCE", "ACTIVE_STANDBY_MULTI_AZ", "CLUSTER_MULTI_AZ" ] }, "DescribeBrokerEngineTypesRequest" : { "type" : "structure", @@ -1673,7 +1678,7 @@ "EngineType" : { "shape" : "EngineType", "locationName" : "engineType", - "documentation" : "Required. The type of broker engine. Note: Currently, Amazon MQ supports only ACTIVEMQ." + "documentation" : "Required. The type of broker engine. Note: Currently, Amazon MQ supports ACTIVEMQ and RABBITMQ." }, "EngineVersion" : { "shape" : "__string", @@ -1743,7 +1748,7 @@ "SubnetIds" : { "shape" : "__listOf__string", "locationName" : "subnetIds", - "documentation" : "The list of groups (2 maximum) that define which subnets and IP ranges the broker can use from different Availability Zones. A SINGLE_INSTANCE deployment requires one subnet (for example, the default subnet). An ACTIVE_STANDBY_MULTI_AZ deployment requires two subnets." + "documentation" : "The list of groups that define which subnets and IP ranges the broker can use from different Availability Zones. A SINGLE_INSTANCE deployment requires one subnet (for example, the default subnet). An ACTIVE_STANDBY_MULTI_AZ deployment (ACTIVEMQ) requires two subnets. A CLUSTER_MULTI_AZ deployment (RABBITMQ) has no subnet requirements when deployed with public accessibility, deployment without public accessibility requires at least one subnet." }, "Tags" : { "shape" : "__mapOf__string", @@ -1753,7 +1758,7 @@ "Users" : { "shape" : "__listOfUserSummary", "locationName" : "users", - "documentation" : "The list of all ActiveMQ usernames for the specified broker." + "documentation" : "The list of all broker usernames for the specified broker." } }, "documentation" : "The version of the broker engine. For a list of supported engine versions, see https://docs.aws.amazon.com/amazon-mq/latest/developer-guide/broker-engine.html" @@ -1831,7 +1836,7 @@ "EngineType" : { "shape" : "EngineType", "locationName" : "engineType", - "documentation" : "Required. The type of broker engine. Note: Currently, Amazon MQ supports only ACTIVEMQ." + "documentation" : "Required. The type of broker engine. Note: Currently, Amazon MQ supports ACTIVEMQ and RABBITMQ." }, "EngineVersion" : { "shape" : "__string", @@ -1901,7 +1906,7 @@ "SubnetIds" : { "shape" : "__listOf__string", "locationName" : "subnetIds", - "documentation" : "The list of groups (2 maximum) that define which subnets and IP ranges the broker can use from different Availability Zones. A SINGLE_INSTANCE deployment requires one subnet (for example, the default subnet). An ACTIVE_STANDBY_MULTI_AZ deployment requires two subnets." + "documentation" : "The list of groups that define which subnets and IP ranges the broker can use from different Availability Zones. A SINGLE_INSTANCE deployment requires one subnet (for example, the default subnet). An ACTIVE_STANDBY_MULTI_AZ deployment (ACTIVEMQ) requires two subnets. A CLUSTER_MULTI_AZ deployment (RABBITMQ) has no subnet requirements when deployed with public accessibility, deployment without public accessibility requires at least one subnet." }, "Tags" : { "shape" : "__mapOf__string", @@ -1911,7 +1916,7 @@ "Users" : { "shape" : "__listOfUserSummary", "locationName" : "users", - "documentation" : "The list of all ActiveMQ usernames for the specified broker." + "documentation" : "The list of all broker usernames for the specified broker." } } }, @@ -1953,7 +1958,7 @@ "EngineType" : { "shape" : "EngineType", "locationName" : "engineType", - "documentation" : "Required. The type of broker engine. Note: Currently, Amazon MQ supports only ACTIVEMQ." + "documentation" : "Required. The type of broker engine. Note: Currently, Amazon MQ supports ACTIVEMQ and RABBITMQ." }, "EngineVersion" : { "shape" : "__string", @@ -2149,8 +2154,8 @@ }, "EngineType" : { "type" : "string", - "documentation" : "The type of broker engine. Note: Currently, Amazon MQ supports only ActiveMQ.", - "enum" : [ "ACTIVEMQ" ] + "documentation" : "The type of broker engine. Note: Currently, Amazon MQ supports ActiveMQ and RabbitMQ.", + "enum" : [ "ACTIVEMQ", "RABBITMQ" ] }, "EngineVersion" : { "type" : "structure", @@ -2620,7 +2625,7 @@ "Audit" : { "shape" : "__boolean", "locationName" : "audit", - "documentation" : "Enables audit logging. Every user management action made using JMX or the ActiveMQ Web Console is logged." + "documentation" : "Enables audit logging. Every user management action made using JMX or the ActiveMQ Web Console is logged. Does not apply to RabbitMQ brokers." }, "General" : { "shape" : "__boolean", @@ -3150,7 +3155,7 @@ "ConsoleAccess" : { "shape" : "__boolean", "locationName" : "consoleAccess", - "documentation" : "Enables access to the the ActiveMQ Web Console for the ActiveMQ user." + "documentation" : "Enables access to the ActiveMQ Web Console for the ActiveMQ user (Does not apply to RabbitMQ brokers)." }, "Groups" : { "shape" : "__listOf__string", @@ -3160,15 +3165,15 @@ "Password" : { "shape" : "__string", "locationName" : "password", - "documentation" : "Required. The password of the ActiveMQ user. This value must be at least 12 characters long, must contain at least 4 unique characters, and must not contain commas." + "documentation" : "Required. The password of the broker user. This value must be at least 12 characters long, must contain at least 4 unique characters, and must not contain commas." }, "Username" : { "shape" : "__string", "locationName" : "username", - "documentation" : "Required. The username of the ActiveMQ user. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long." + "documentation" : "Required. The username of the broker user. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long." } }, - "documentation" : "An ActiveMQ user associated with the broker." + "documentation" : "A user associated with the broker." }, "UserPendingChanges" : { "type" : "structure", @@ -3197,15 +3202,15 @@ "PendingChange" : { "shape" : "ChangeType", "locationName" : "pendingChange", - "documentation" : "The type of change pending for the ActiveMQ user." + "documentation" : "The type of change pending for the broker user." }, "Username" : { "shape" : "__string", "locationName" : "username", - "documentation" : "Required. The username of the ActiveMQ user. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long." + "documentation" : "Required. The username of the broker user. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long." } }, - "documentation" : "Returns a list of all ActiveMQ users." + "documentation" : "Returns a list of all broker users." }, "WeeklyStartTime" : { "type" : "structure", @@ -3360,5 +3365,5 @@ } } }, - "documentation" : "Amazon MQ is a managed message broker service for Apache ActiveMQ that makes it easy to set up and operate message brokers in the cloud. A message broker allows software applications and components to communicate using various programming languages, operating systems, and formal messaging protocols." + "documentation" : "Amazon MQ is a managed message broker service for Apache ActiveMQ and RabbitMQ that makes it easy to set up and operate message brokers in the cloud. A message broker allows software applications and components to communicate using various programming languages, operating systems, and formal messaging protocols." } \ No newline at end of file diff --git a/services/mturk/pom.xml b/services/mturk/pom.xml index 2b278ed1211d..7713239e32f2 100644 --- a/services/mturk/pom.xml +++ b/services/mturk/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT mturk AWS Java SDK :: Services :: Amazon Mechanical Turk Requester diff --git a/services/mwaa/pom.xml b/services/mwaa/pom.xml new file mode 100644 index 000000000000..eb1fcc488082 --- /dev/null +++ b/services/mwaa/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.40-SNAPSHOT + + mwaa + AWS Java SDK :: Services :: MWAA + The AWS Java SDK for MWAA module holds the client classes that are used for + communicating with MWAA. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.mwaa + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/mwaa/src/main/resources/codegen-resources/paginators-1.json b/services/mwaa/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..5e218e4616bb --- /dev/null +++ b/services/mwaa/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,10 @@ +{ + "pagination": { + "ListEnvironments": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Environments" + } + } +} diff --git a/services/mwaa/src/main/resources/codegen-resources/service-2.json b/services/mwaa/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..92e6ba9bf7ad --- /dev/null +++ b/services/mwaa/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,1207 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2020-07-01", + "endpointPrefix":"airflow", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"AmazonMWAA", + "serviceId":"MWAA", + "signatureVersion":"v4", + "signingName":"airflow", + "uid":"mwaa-2020-07-01" + }, + "operations":{ + "CreateCliToken":{ + "name":"CreateCliToken", + "http":{ + "method":"POST", + "requestUri":"/clitoken/{Name}", + "responseCode":200 + }, + "input":{"shape":"CreateCliTokenRequest"}, + "output":{"shape":"CreateCliTokenResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Create a CLI token to use Airflow CLI.

", + "endpoint":{"hostPrefix":"env."} + }, + "CreateEnvironment":{ + "name":"CreateEnvironment", + "http":{ + "method":"PUT", + "requestUri":"/environments/{Name}", + "responseCode":200 + }, + "input":{"shape":"CreateEnvironmentInput"}, + "output":{"shape":"CreateEnvironmentOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

JSON blob that describes the environment to create.

", + "endpoint":{"hostPrefix":"api."}, + "idempotent":true + }, + "CreateWebLoginToken":{ + "name":"CreateWebLoginToken", + "http":{ + "method":"POST", + "requestUri":"/webtoken/{Name}", + "responseCode":200 + }, + "input":{"shape":"CreateWebLoginTokenRequest"}, + "output":{"shape":"CreateWebLoginTokenResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Create a JWT token to be used to login to Airflow Web UI with claims based Authentication.

", + "endpoint":{"hostPrefix":"env."}, + "idempotent":true + }, + "DeleteEnvironment":{ + "name":"DeleteEnvironment", + "http":{ + "method":"DELETE", + "requestUri":"/environments/{Name}", + "responseCode":200 + }, + "input":{"shape":"DeleteEnvironmentInput"}, + "output":{"shape":"DeleteEnvironmentOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Delete an existing environment.

", + "endpoint":{"hostPrefix":"api."}, + "idempotent":true + }, + "GetEnvironment":{ + "name":"GetEnvironment", + "http":{ + "method":"GET", + "requestUri":"/environments/{Name}", + "responseCode":200 + }, + "input":{"shape":"GetEnvironmentInput"}, + "output":{"shape":"GetEnvironmentOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Get details of an existing environment.

", + "endpoint":{"hostPrefix":"api."} + }, + "ListEnvironments":{ + "name":"ListEnvironments", + "http":{ + "method":"GET", + "requestUri":"/environments", + "responseCode":200 + }, + "input":{"shape":"ListEnvironmentsInput"}, + "output":{"shape":"ListEnvironmentsOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

List Amazon MWAA Environments.

", + "endpoint":{"hostPrefix":"api."} + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{ResourceArn}", + "responseCode":200 + }, + "input":{"shape":"ListTagsForResourceInput"}, + "output":{"shape":"ListTagsForResourceOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

List the tags for MWAA environments.

", + "endpoint":{"hostPrefix":"api."} + }, + "PublishMetrics":{ + "name":"PublishMetrics", + "http":{ + "method":"POST", + "requestUri":"/metrics/environments/{EnvironmentName}", + "responseCode":200 + }, + "input":{"shape":"PublishMetricsInput"}, + "output":{"shape":"PublishMetricsOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

An operation for publishing metrics from the customers to the Ops plane.

", + "endpoint":{"hostPrefix":"ops."} + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{ResourceArn}", + "responseCode":200 + }, + "input":{"shape":"TagResourceInput"}, + "output":{"shape":"TagResourceOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Add tag to the MWAA environments.

", + "endpoint":{"hostPrefix":"api."} + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{ResourceArn}", + "responseCode":200 + }, + "input":{"shape":"UntagResourceInput"}, + "output":{"shape":"UntagResourceOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Remove a tag from the MWAA environments.

", + "endpoint":{"hostPrefix":"api."}, + "idempotent":true + }, + "UpdateEnvironment":{ + "name":"UpdateEnvironment", + "http":{ + "method":"PATCH", + "requestUri":"/environments/{Name}", + "responseCode":200 + }, + "input":{"shape":"UpdateEnvironmentInput"}, + "output":{"shape":"UpdateEnvironmentOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Update an MWAA environment.

", + "endpoint":{"hostPrefix":"api."} + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

Access to the Airflow Web UI or CLI has been Denied. Please follow the MWAA user guide to setup permissions to access the Web UI and CLI functionality.

", + "error":{ + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "AirflowConfigurationOptions":{ + "type":"map", + "key":{"shape":"ConfigKey"}, + "value":{"shape":"ConfigValue"} + }, + "AirflowVersion":{ + "type":"string", + "max":32, + "min":1, + "pattern":"^[0-9a-z.]+$" + }, + "CloudWatchLogGroupArn":{ + "type":"string", + "max":1224, + "min":1, + "pattern":"^arn:aws(-[a-z]+)?:logs:[a-z0-9\\-]+:\\d{12}:log-group:\\w+" + }, + "ConfigKey":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[a-z]+([a-z._]*[a-z]+)?$" + }, + "ConfigValue":{ + "type":"string", + "max":256, + "min":1, + "pattern":".*" + }, + "CreateCliTokenRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"EnvironmentName", + "documentation":"

Create a CLI token request for a MWAA environment.

", + "location":"uri", + "locationName":"Name" + } + } + }, + "CreateCliTokenResponse":{ + "type":"structure", + "members":{ + "CliToken":{ + "shape":"SyntheticCreateCliTokenResponseToken", + "documentation":"

Create an Airflow CLI login token response for the provided JWT token.

" + }, + "WebServerHostname":{ + "shape":"Hostname", + "documentation":"

Create an Airflow CLI login token response for the provided webserver hostname.

" + } + } + }, + "CreateEnvironmentInput":{ + "type":"structure", + "required":[ + "DagS3Path", + "ExecutionRoleArn", + "Name", + "NetworkConfiguration", + "SourceBucketArn" + ], + "members":{ + "AirflowConfigurationOptions":{ + "shape":"SyntheticCreateEnvironmentInputAirflowConfigurationOptions", + "documentation":"

The Apache Airflow configuration setting you want to override in your environment. For more information, see Environment configuration.

" + }, + "AirflowVersion":{ + "shape":"AirflowVersion", + "documentation":"

The Apache Airflow version you want to use for your environment.

" + }, + "DagS3Path":{ + "shape":"RelativePath", + "documentation":"

The relative path to the DAG folder on your Amazon S3 storage bucket. For example, dags. For more information, see Importing DAGs on Amazon MWAA.

" + }, + "EnvironmentClass":{ + "shape":"EnvironmentClass", + "documentation":"

The environment class you want to use for your environment. The environment class determines the size of the containers and database used for your Apache Airflow services.

" + }, + "ExecutionRoleArn":{ + "shape":"IamRoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the execution role for your environment. An execution role is an AWS Identity and Access Management (IAM) role that grants MWAA permission to access AWS services and resources used by your environment. For example, arn:aws:iam::123456789:role/my-execution-role. For more information, see Managing access to Amazon Managed Workflows for Apache Airflow.

" + }, + "KmsKey":{ + "shape":"KmsKey", + "documentation":"

The AWS Key Management Service (KMS) key to encrypt and decrypt the data in your environment. You can use an AWS KMS key managed by MWAA, or a custom KMS key (advanced). For more information, see Customer master keys (CMKs) in the AWS KMS developer guide.

" + }, + "LoggingConfiguration":{ + "shape":"LoggingConfigurationInput", + "documentation":"

The Apache Airflow logs you want to send to Amazon CloudWatch Logs.

" + }, + "MaxWorkers":{ + "shape":"MaxWorkers", + "documentation":"

The maximum number of workers that you want to run in your environment. MWAA scales the number of Apache Airflow workers and the Fargate containers that run your tasks up to the number you specify in this field. When there are no more tasks running, and no more in the queue, MWAA disposes of the extra containers leaving the one worker that is included with your environment.

" + }, + "Name":{ + "shape":"EnvironmentName", + "documentation":"

The name of your MWAA environment.

", + "location":"uri", + "locationName":"Name" + }, + "NetworkConfiguration":{ + "shape":"NetworkConfiguration", + "documentation":"

The VPC networking components you want to use for your environment. At least two private subnet identifiers and one VPC security group identifier are required to create an environment. For more information, see Creating the VPC network for a MWAA environment.

" + }, + "PluginsS3ObjectVersion":{ + "shape":"S3ObjectVersion", + "documentation":"

The plugins.zip file version you want to use.

" + }, + "PluginsS3Path":{ + "shape":"RelativePath", + "documentation":"

The relative path to the plugins.zip file on your Amazon S3 storage bucket. For example, plugins.zip. If a relative path is provided in the request, then PluginsS3ObjectVersion is required. For more information, see Importing DAGs on Amazon MWAA.

" + }, + "RequirementsS3ObjectVersion":{ + "shape":"S3ObjectVersion", + "documentation":"

The requirements.txt file version you want to use.

" + }, + "RequirementsS3Path":{ + "shape":"RelativePath", + "documentation":"

The relative path to the requirements.txt file on your Amazon S3 storage bucket. For example, requirements.txt. If a relative path is provided in the request, then RequirementsS3ObjectVersion is required. For more information, see Importing DAGs on Amazon MWAA.

" + }, + "SourceBucketArn":{ + "shape":"S3BucketArn", + "documentation":"

The Amazon Resource Name (ARN) of your Amazon S3 storage bucket. For example, arn:aws:s3:::airflow-mybucketname.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

The metadata tags you want to attach to your environment. For more information, see Tagging AWS resources.

" + }, + "WebserverAccessMode":{ + "shape":"WebserverAccessMode", + "documentation":"

The networking access of your Apache Airflow web server. A public network allows your Airflow UI to be accessed over the Internet by users granted access in your IAM policy. A private network limits access of your Airflow UI to users within your VPC. For more information, see Creating the VPC network for a MWAA environment.

" + }, + "WeeklyMaintenanceWindowStart":{ + "shape":"WeeklyMaintenanceWindowStart", + "documentation":"

The day and time you want MWAA to start weekly maintenance updates on your environment.

" + } + }, + "documentation":"

This section contains the Amazon Managed Workflows for Apache Airflow (MWAA) API reference documentation to create an environment. For more information, see Get started with Amazon Managed Workflows for Apache Airflow.

" + }, + "CreateEnvironmentOutput":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"EnvironmentArn", + "documentation":"

The resulting Amazon MWAA envirnonment ARN.

" + } + } + }, + "CreateWebLoginTokenRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"EnvironmentName", + "documentation":"

Create an Airflow Web UI login token request for a MWAA environment.

", + "location":"uri", + "locationName":"Name" + } + } + }, + "CreateWebLoginTokenResponse":{ + "type":"structure", + "members":{ + "WebServerHostname":{ + "shape":"Hostname", + "documentation":"

Create an Airflow Web UI login token response for the provided webserver hostname.

" + }, + "WebToken":{ + "shape":"SyntheticCreateWebLoginTokenResponseToken", + "documentation":"

Create an Airflow Web UI login token response for the provided JWT token.

" + } + } + }, + "CreatedAt":{"type":"timestamp"}, + "DeleteEnvironmentInput":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"EnvironmentName", + "documentation":"

The name of the environment to delete.

", + "location":"uri", + "locationName":"Name" + } + } + }, + "DeleteEnvironmentOutput":{ + "type":"structure", + "members":{ + } + }, + "Dimension":{ + "type":"structure", + "required":[ + "Name", + "Value" + ], + "members":{ + "Name":{ + "shape":"String", + "documentation":"

Internal only API.

" + }, + "Value":{ + "shape":"String", + "documentation":"

Internal only API.

" + } + }, + "documentation":"

Internal only API.

" + }, + "Dimensions":{ + "type":"list", + "member":{"shape":"Dimension"} + }, + "Double":{ + "type":"double", + "box":true + }, + "Environment":{ + "type":"structure", + "members":{ + "AirflowConfigurationOptions":{ + "shape":"AirflowConfigurationOptions", + "documentation":"

The Airflow Configuration Options of the Amazon MWAA Environment.

" + }, + "AirflowVersion":{ + "shape":"AirflowVersion", + "documentation":"

The AirflowV ersion of the Amazon MWAA Environment.

" + }, + "Arn":{ + "shape":"EnvironmentArn", + "documentation":"

The ARN of the Amazon MWAA Environment.

" + }, + "CreatedAt":{ + "shape":"CreatedAt", + "documentation":"

The Created At date of the Amazon MWAA Environment.

" + }, + "DagS3Path":{ + "shape":"RelativePath", + "documentation":"

The Dags S3 Path of the Amazon MWAA Environment.

" + }, + "EnvironmentClass":{ + "shape":"EnvironmentClass", + "documentation":"

The Environment Class (size) of the Amazon MWAA Environment.

" + }, + "ExecutionRoleArn":{ + "shape":"IamRoleArn", + "documentation":"

The Execution Role ARN of the Amazon MWAA Environment.

" + }, + "KmsKey":{ + "shape":"KmsKey", + "documentation":"

The Kms Key of the Amazon MWAA Environment.

" + }, + "LastUpdate":{"shape":"LastUpdate"}, + "LoggingConfiguration":{ + "shape":"LoggingConfiguration", + "documentation":"

The Logging Configuration of the Amazon MWAA Environment.

" + }, + "MaxWorkers":{ + "shape":"MaxWorkers", + "documentation":"

The Maximum Workers of the Amazon MWAA Environment.

" + }, + "Name":{ + "shape":"EnvironmentName", + "documentation":"

The name of the Amazon MWAA Environment.

" + }, + "NetworkConfiguration":{"shape":"NetworkConfiguration"}, + "PluginsS3ObjectVersion":{ + "shape":"S3ObjectVersion", + "documentation":"

The Plugins.zip S3 Object Version of the Amazon MWAA Environment.

" + }, + "PluginsS3Path":{ + "shape":"RelativePath", + "documentation":"

The Plugins.zip S3 Path of the Amazon MWAA Environment.

" + }, + "RequirementsS3ObjectVersion":{ + "shape":"S3ObjectVersion", + "documentation":"

The Requirements.txt file S3 Object Version of the Amazon MWAA Environment.

" + }, + "RequirementsS3Path":{ + "shape":"RelativePath", + "documentation":"

The Requirement.txt S3 Path of the Amazon MWAA Environment.

" + }, + "ServiceRoleArn":{ + "shape":"IamRoleArn", + "documentation":"

The Service Role ARN of the Amazon MWAA Environment.

" + }, + "SourceBucketArn":{ + "shape":"S3BucketArn", + "documentation":"

The Source S3 Bucket ARN of the Amazon MWAA Environment.

" + }, + "Status":{ + "shape":"EnvironmentStatus", + "documentation":"

The status of the Amazon MWAA Environment.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

The Tags of the Amazon MWAA Environment.

" + }, + "WebserverAccessMode":{ + "shape":"WebserverAccessMode", + "documentation":"

The Webserver Access Mode of the Amazon MWAA Environment (public or private only).

" + }, + "WebserverUrl":{ + "shape":"WebserverUrl", + "documentation":"

The Webserver URL of the Amazon MWAA Environment.

" + }, + "WeeklyMaintenanceWindowStart":{ + "shape":"WeeklyMaintenanceWindowStart", + "documentation":"

The Weekly Maintenance Window Start of the Amazon MWAA Environment.

" + } + }, + "documentation":"

An Amazon MWAA environment.

" + }, + "EnvironmentArn":{ + "type":"string", + "max":1224, + "min":1, + "pattern":"^arn:aws(-[a-z]+)?:airflow:[a-z0-9\\-]+:\\d{12}:environment/\\w+" + }, + "EnvironmentClass":{ + "type":"string", + "max":1024, + "min":1 + }, + "EnvironmentList":{ + "type":"list", + "member":{"shape":"EnvironmentName"} + }, + "EnvironmentName":{ + "type":"string", + "max":80, + "min":1, + "pattern":"^[a-zA-Z][0-9a-zA-Z-_]*$" + }, + "EnvironmentStatus":{ + "type":"string", + "enum":[ + "CREATING", + "CREATE_FAILED", + "AVAILABLE", + "UPDATING", + "DELETING", + "DELETED" + ] + }, + "ErrorCode":{"type":"string"}, + "ErrorMessage":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"^.+$" + }, + "GetEnvironmentInput":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"EnvironmentName", + "documentation":"

The name of the environment to retrieve.

", + "location":"uri", + "locationName":"Name" + } + } + }, + "GetEnvironmentOutput":{ + "type":"structure", + "members":{ + "Environment":{ + "shape":"Environment", + "documentation":"

A JSON blob with environment details.

" + } + } + }, + "Hostname":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]*[a-zA-Z0-9])\\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\\-]*[A-Za-z0-9])$" + }, + "IamRoleArn":{ + "type":"string", + "max":1224, + "min":1, + "pattern":"^arn:aws(-[a-z]+)?:iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+$" + }, + "Integer":{ + "type":"integer", + "box":true + }, + "InternalServerException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

InternalServerException: An internal error has occurred.

", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "KmsKey":{ + "type":"string", + "max":1224, + "min":1, + "pattern":"^(((arn:aws(-[a-z]+)?:kms:[a-z]{2}-[a-z]+-\\d:\\d+:)?key\\/)?[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}|(arn:aws(-[a-z]+)?:kms:[a-z]{2}-[a-z]+-\\d:\\d+:)?alias/.+)$" + }, + "LastUpdate":{ + "type":"structure", + "members":{ + "CreatedAt":{ + "shape":"UpdateCreatedAt", + "documentation":"

Time that last update occurred.

" + }, + "Error":{ + "shape":"UpdateError", + "documentation":"

Error string of last update, if applicable.

" + }, + "Status":{ + "shape":"UpdateStatus", + "documentation":"

Status of last update of SUCCESS, FAILED, CREATING, DELETING.

" + } + }, + "documentation":"

Last update information for the environment.

" + }, + "ListEnvironmentsInput":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"ListEnvironmentsInputMaxResultsInteger", + "documentation":"

The maximum results when listing MWAA environments.

", + "location":"querystring", + "locationName":"MaxResults" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The Next Token when listing MWAA environments.

", + "location":"querystring", + "locationName":"NextToken" + } + } + }, + "ListEnvironmentsInputMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":25, + "min":1 + }, + "ListEnvironmentsOutput":{ + "type":"structure", + "required":["Environments"], + "members":{ + "Environments":{ + "shape":"EnvironmentList", + "documentation":"

The list of Amazon MWAA Environments.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The Next Token when listing MWAA environments.

" + } + } + }, + "ListTagsForResourceInput":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"EnvironmentArn", + "documentation":"

The ARN of the MWAA environment.

", + "location":"uri", + "locationName":"ResourceArn" + } + } + }, + "ListTagsForResourceOutput":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"TagMap", + "documentation":"

The tags of the MWAA environments.

" + } + } + }, + "LoggingConfiguration":{ + "type":"structure", + "members":{ + "DagProcessingLogs":{"shape":"ModuleLoggingConfiguration"}, + "SchedulerLogs":{"shape":"ModuleLoggingConfiguration"}, + "TaskLogs":{"shape":"ModuleLoggingConfiguration"}, + "WebserverLogs":{"shape":"ModuleLoggingConfiguration"}, + "WorkerLogs":{"shape":"ModuleLoggingConfiguration"} + }, + "documentation":"

The Logging Configuration of your Amazon MWAA environment.

" + }, + "LoggingConfigurationInput":{ + "type":"structure", + "members":{ + "DagProcessingLogs":{"shape":"ModuleLoggingConfigurationInput"}, + "SchedulerLogs":{"shape":"ModuleLoggingConfigurationInput"}, + "TaskLogs":{"shape":"ModuleLoggingConfigurationInput"}, + "WebserverLogs":{"shape":"ModuleLoggingConfigurationInput"}, + "WorkerLogs":{"shape":"ModuleLoggingConfigurationInput"} + }, + "documentation":"

The Logging Configuration of your Amazon MWAA environment.

" + }, + "LoggingEnabled":{ + "type":"boolean", + "box":true + }, + "LoggingLevel":{ + "type":"string", + "enum":[ + "CRITICAL", + "ERROR", + "WARNING", + "INFO", + "DEBUG" + ] + }, + "MaxWorkers":{ + "type":"integer", + "box":true, + "min":1 + }, + "MetricData":{ + "type":"list", + "member":{"shape":"MetricDatum"} + }, + "MetricDatum":{ + "type":"structure", + "required":[ + "MetricName", + "Timestamp" + ], + "members":{ + "Dimensions":{ + "shape":"Dimensions", + "documentation":"

Internal only API.

" + }, + "MetricName":{ + "shape":"String", + "documentation":"

Internal only API.

" + }, + "StatisticValues":{ + "shape":"StatisticSet", + "documentation":"

Internal only API.

" + }, + "Timestamp":{ + "shape":"Timestamp", + "documentation":"

Internal only API.

" + }, + "Unit":{"shape":"Unit"}, + "Value":{ + "shape":"Double", + "documentation":"

Internal only API.

" + } + }, + "documentation":"

Internal only API.

" + }, + "ModuleLoggingConfiguration":{ + "type":"structure", + "members":{ + "CloudWatchLogGroupArn":{ + "shape":"CloudWatchLogGroupArn", + "documentation":"

Provides the ARN for the CloudWatch group where the logs will be published.

" + }, + "Enabled":{ + "shape":"LoggingEnabled", + "documentation":"

Defines that the logging module is enabled.

" + }, + "LogLevel":{ + "shape":"LoggingLevel", + "documentation":"

Defines the log level, which can be CRITICAL, ERROR, WARNING, or INFO.

" + } + }, + "documentation":"

A JSON blob that provides configuration to use for logging with respect to the various Apache Airflow services: DagProcessingLogs, SchedulerLogs, TaskLogs, WebserverLogs, and WorkerLogs.

" + }, + "ModuleLoggingConfigurationInput":{ + "type":"structure", + "required":[ + "Enabled", + "LogLevel" + ], + "members":{ + "Enabled":{ + "shape":"LoggingEnabled", + "documentation":"

Defines that the logging module is enabled.

" + }, + "LogLevel":{ + "shape":"LoggingLevel", + "documentation":"

Defines the log level, which can be CRITICAL, ERROR, WARNING, or INFO.

" + } + }, + "documentation":"

A JSON blob that provides configuration to use for logging with respect to the various Apache Airflow services: DagProcessingLogs, SchedulerLogs, TaskLogs, WebserverLogs, and WorkerLogs.

" + }, + "NetworkConfiguration":{ + "type":"structure", + "members":{ + "SecurityGroupIds":{ + "shape":"SecurityGroupList", + "documentation":"

A JSON list of 1 or more security groups IDs by name, in the same VPC as the subnets.

" + }, + "SubnetIds":{ + "shape":"SubnetList", + "documentation":"

Provide a JSON list of 2 subnet IDs by name. These must be private subnets, in the same VPC, in two different availability zones.

" + } + }, + "documentation":"

Provide the security group and subnet IDs for the workers and scheduler.

" + }, + "NextToken":{ + "type":"string", + "max":2048, + "min":0 + }, + "PublishMetricsInput":{ + "type":"structure", + "required":[ + "EnvironmentName", + "MetricData" + ], + "members":{ + "EnvironmentName":{ + "shape":"EnvironmentName", + "documentation":"

Publishes environment metric data to Amazon CloudWatch.

", + "location":"uri", + "locationName":"EnvironmentName" + }, + "MetricData":{ + "shape":"MetricData", + "documentation":"

Publishes metric data points to Amazon CloudWatch. CloudWatch associates the data points with the specified metrica.

" + } + } + }, + "PublishMetricsOutput":{ + "type":"structure", + "members":{ + } + }, + "RelativePath":{ + "type":"string", + "max":1024, + "min":1, + "pattern":".*" + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

ResourceNotFoundException: The resource is not available.

", + "error":{ + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "S3BucketArn":{ + "type":"string", + "max":1224, + "min":1, + "pattern":"^arn:aws(-[a-z]+)?:s3:::airflow-[a-z0-9.\\-]+$" + }, + "S3ObjectVersion":{ + "type":"string", + "max":1024, + "min":1 + }, + "SecurityGroupId":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"^sg-[a-zA-Z0-9\\-._]+$" + }, + "SecurityGroupList":{ + "type":"list", + "member":{"shape":"SecurityGroupId"}, + "max":5, + "min":1 + }, + "StatisticSet":{ + "type":"structure", + "members":{ + "Maximum":{ + "shape":"Double", + "documentation":"

Internal only API.

" + }, + "Minimum":{ + "shape":"Double", + "documentation":"

Internal only API.

" + }, + "SampleCount":{ + "shape":"Integer", + "documentation":"

Internal only API.

" + }, + "Sum":{ + "shape":"Double", + "documentation":"

Internal only API.

" + } + }, + "documentation":"

Internal only API.

" + }, + "String":{"type":"string"}, + "SubnetId":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"^subnet-[a-zA-Z0-9\\-._]+$" + }, + "SubnetList":{ + "type":"list", + "member":{"shape":"SubnetId"}, + "max":2, + "min":2 + }, + "SyntheticCreateCliTokenResponseToken":{ + "type":"string", + "sensitive":true + }, + "SyntheticCreateEnvironmentInputAirflowConfigurationOptions":{ + "type":"map", + "key":{"shape":"ConfigKey"}, + "value":{"shape":"ConfigValue"}, + "sensitive":true + }, + "SyntheticCreateWebLoginTokenResponseToken":{ + "type":"string", + "sensitive":true + }, + "SyntheticUpdateEnvironmentInputAirflowConfigurationOptions":{ + "type":"map", + "key":{"shape":"ConfigKey"}, + "value":{"shape":"ConfigValue"}, + "sensitive":true + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":50, + "min":0 + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":50, + "min":1 + }, + "TagResourceInput":{ + "type":"structure", + "required":[ + "ResourceArn", + "Tags" + ], + "members":{ + "ResourceArn":{ + "shape":"EnvironmentArn", + "documentation":"

The tag resource ARN of the MWAA environments.

", + "location":"uri", + "locationName":"ResourceArn" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

The tag resource tag of the MWAA environments.

" + } + } + }, + "TagResourceOutput":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + }, + "Timestamp":{"type":"timestamp"}, + "Unit":{ + "type":"string", + "documentation":"

Unit

", + "enum":[ + "Seconds", + "Microseconds", + "Milliseconds", + "Bytes", + "Kilobytes", + "Megabytes", + "Gigabytes", + "Terabytes", + "Bits", + "Kilobits", + "Megabits", + "Gigabits", + "Terabits", + "Percent", + "Count", + "Bytes/Second", + "Kilobytes/Second", + "Megabytes/Second", + "Gigabytes/Second", + "Terabytes/Second", + "Bits/Second", + "Kilobits/Second", + "Megabits/Second", + "Gigabits/Second", + "Terabits/Second", + "Count/Second", + "None" + ] + }, + "UntagResourceInput":{ + "type":"structure", + "required":[ + "ResourceArn", + "tagKeys" + ], + "members":{ + "ResourceArn":{ + "shape":"EnvironmentArn", + "documentation":"

The tag resource ARN of the MWAA environments.

", + "location":"uri", + "locationName":"ResourceArn" + }, + "tagKeys":{ + "shape":"TagKeyList", + "documentation":"

The tag resource key of the MWAA environments.

", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceOutput":{ + "type":"structure", + "members":{ + } + }, + "UpdateCreatedAt":{"type":"timestamp"}, + "UpdateEnvironmentInput":{ + "type":"structure", + "required":["Name"], + "members":{ + "AirflowConfigurationOptions":{ + "shape":"SyntheticUpdateEnvironmentInputAirflowConfigurationOptions", + "documentation":"

The Airflow Configuration Options to update of your Amazon MWAA environment.

" + }, + "AirflowVersion":{ + "shape":"AirflowVersion", + "documentation":"

The Airflow Version to update of your Amazon MWAA environment.

" + }, + "DagS3Path":{ + "shape":"RelativePath", + "documentation":"

The Dags folder S3 Path to update of your Amazon MWAA environment.

" + }, + "EnvironmentClass":{ + "shape":"EnvironmentClass", + "documentation":"

The Environment Class to update of your Amazon MWAA environment.

" + }, + "ExecutionRoleArn":{ + "shape":"IamRoleArn", + "documentation":"

The Executio Role ARN to update of your Amazon MWAA environment.

" + }, + "LoggingConfiguration":{ + "shape":"LoggingConfigurationInput", + "documentation":"

The Logging Configuration to update of your Amazon MWAA environment.

" + }, + "MaxWorkers":{ + "shape":"MaxWorkers", + "documentation":"

The Maximum Workers to update of your Amazon MWAA environment.

" + }, + "Name":{ + "shape":"EnvironmentName", + "documentation":"

The name of your Amazon MWAA environment that you wish to update.

", + "location":"uri", + "locationName":"Name" + }, + "NetworkConfiguration":{ + "shape":"UpdateNetworkConfigurationInput", + "documentation":"

The Network Configuration to update of your Amazon MWAA environment.

" + }, + "PluginsS3ObjectVersion":{ + "shape":"S3ObjectVersion", + "documentation":"

The Plugins.zip S3 Object Version to update of your Amazon MWAA environment.

" + }, + "PluginsS3Path":{ + "shape":"RelativePath", + "documentation":"

The Plugins.zip S3 Path to update of your Amazon MWAA environment.

" + }, + "RequirementsS3ObjectVersion":{ + "shape":"S3ObjectVersion", + "documentation":"

The Requirements.txt S3 ObjectV ersion to update of your Amazon MWAA environment.

" + }, + "RequirementsS3Path":{ + "shape":"RelativePath", + "documentation":"

The Requirements.txt S3 Path to update of your Amazon MWAA environment.

" + }, + "SourceBucketArn":{ + "shape":"S3BucketArn", + "documentation":"

The S3 Source Bucket ARN to update of your Amazon MWAA environment.

" + }, + "WebserverAccessMode":{ + "shape":"WebserverAccessMode", + "documentation":"

The Webserver Access Mode to update of your Amazon MWAA environment.

" + }, + "WeeklyMaintenanceWindowStart":{ + "shape":"WeeklyMaintenanceWindowStart", + "documentation":"

The Weekly Maintenance Window Start to update of your Amazon MWAA environment.

" + } + } + }, + "UpdateEnvironmentOutput":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"EnvironmentArn", + "documentation":"

The ARN to update of your Amazon MWAA environment.

" + } + } + }, + "UpdateError":{ + "type":"structure", + "members":{ + "ErrorCode":{ + "shape":"ErrorCode", + "documentation":"

Error code of update.

" + }, + "ErrorMessage":{ + "shape":"ErrorMessage", + "documentation":"

Error message of update.

" + } + }, + "documentation":"

Error information of update, if applicable.

" + }, + "UpdateNetworkConfigurationInput":{ + "type":"structure", + "required":["SecurityGroupIds"], + "members":{ + "SecurityGroupIds":{ + "shape":"SecurityGroupList", + "documentation":"

Provide a JSON list of 1 or more security groups IDs by name, in the same VPC as the subnets.

" + } + }, + "documentation":"

Provide the security group and subnet IDs for the workers and scheduler.

" + }, + "UpdateStatus":{ + "type":"string", + "enum":[ + "SUCCESS", + "PENDING", + "FAILED" + ] + }, + "ValidationException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

ValidationException: The provided input is not valid.

", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "WebserverAccessMode":{ + "type":"string", + "enum":[ + "PRIVATE_ONLY", + "PUBLIC_ONLY" + ] + }, + "WebserverUrl":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^https://.+$" + }, + "WeeklyMaintenanceWindowStart":{ + "type":"string", + "max":9, + "min":1, + "pattern":"(MON|TUE|WED|THU|FRI|SAT|SUN):([01]\\d|2[0-3]):(00|30)" + } + }, + "documentation":"

Amazon Managed Workflows for Apache Airflow

This section contains the Amazon Managed Workflows for Apache Airflow (MWAA) API reference documentation. For more information, see What Is Amazon MWAA?.

" +} diff --git a/services/neptune/pom.xml b/services/neptune/pom.xml index 8263989edd6d..a1a5a7eb88cd 100644 --- a/services/neptune/pom.xml +++ b/services/neptune/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT neptune AWS Java SDK :: Services :: Neptune diff --git a/services/neptune/src/main/resources/codegen-resources/paginators-1.json b/services/neptune/src/main/resources/codegen-resources/paginators-1.json index 2a4588640a88..fddbddd2e111 100644 --- a/services/neptune/src/main/resources/codegen-resources/paginators-1.json +++ b/services/neptune/src/main/resources/codegen-resources/paginators-1.json @@ -1,5 +1,11 @@ { "pagination": { + "DescribeDBClusterEndpoints": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "DBClusterEndpoints" + }, "DescribeDBEngineVersions": { "input_token": "Marker", "limit_key": "MaxRecords", diff --git a/services/neptune/src/main/resources/codegen-resources/service-2.json b/services/neptune/src/main/resources/codegen-resources/service-2.json index ed8a484a390b..c586b8be6c0f 100644 --- a/services/neptune/src/main/resources/codegen-resources/service-2.json +++ b/services/neptune/src/main/resources/codegen-resources/service-2.json @@ -162,6 +162,27 @@ ], "documentation":"

Creates a new Amazon Neptune DB cluster.

You can use the ReplicationSourceIdentifier parameter to create the DB cluster as a Read Replica of another DB cluster or Amazon Neptune DB instance.

Note that when you create a new cluster using CreateDBCluster directly, deletion protection is disabled by default (when you create a new production cluster in the console, deletion protection is enabled by default). You can only delete a DB cluster if its DeletionProtection field is set to false.

" }, + "CreateDBClusterEndpoint":{ + "name":"CreateDBClusterEndpoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBClusterEndpointMessage"}, + "output":{ + "shape":"CreateDBClusterEndpointOutput", + "resultWrapper":"CreateDBClusterEndpointResult" + }, + "errors":[ + {"shape":"DBClusterEndpointQuotaExceededFault"}, + {"shape":"DBClusterEndpointAlreadyExistsFault"}, + {"shape":"DBClusterNotFoundFault"}, + {"shape":"InvalidDBClusterStateFault"}, + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"InvalidDBInstanceStateFault"} + ], + "documentation":"

Creates a new custom endpoint and associates it with an Amazon Neptune DB cluster.

" + }, "CreateDBClusterParameterGroup":{ "name":"CreateDBClusterParameterGroup", "http":{ @@ -311,6 +332,24 @@ ], "documentation":"

The DeleteDBCluster action deletes a previously provisioned DB cluster. When you delete a DB cluster, all automated backups for that DB cluster are deleted and can't be recovered. Manual DB cluster snapshots of the specified DB cluster are not deleted.

Note that the DB Cluster cannot be deleted if deletion protection is enabled. To delete it, you must first set its DeletionProtection field to False.

" }, + "DeleteDBClusterEndpoint":{ + "name":"DeleteDBClusterEndpoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBClusterEndpointMessage"}, + "output":{ + "shape":"DeleteDBClusterEndpointOutput", + "resultWrapper":"DeleteDBClusterEndpointResult" + }, + "errors":[ + {"shape":"InvalidDBClusterEndpointStateFault"}, + {"shape":"DBClusterEndpointNotFoundFault"}, + {"shape":"InvalidDBClusterStateFault"} + ], + "documentation":"

Deletes a custom endpoint and removes it from an Amazon Neptune DB cluster.

" + }, "DeleteDBClusterParameterGroup":{ "name":"DeleteDBClusterParameterGroup", "http":{ @@ -405,6 +444,22 @@ ], "documentation":"

Deletes an event notification subscription.

" }, + "DescribeDBClusterEndpoints":{ + "name":"DescribeDBClusterEndpoints", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBClusterEndpointsMessage"}, + "output":{ + "shape":"DBClusterEndpointMessage", + "resultWrapper":"DescribeDBClusterEndpointsResult" + }, + "errors":[ + {"shape":"DBClusterNotFoundFault"} + ], + "documentation":"

Returns information about endpoints for an Amazon Neptune DB cluster.

This operation can also return information for Amazon RDS clusters and Amazon DocDB clusters.

" + }, "DescribeDBClusterParameterGroups":{ "name":"DescribeDBClusterParameterGroups", "http":{ @@ -738,6 +793,26 @@ ], "documentation":"

Modify a setting for a DB cluster. You can change one or more database configuration parameters by specifying these parameters and the new values in the request.

" }, + "ModifyDBClusterEndpoint":{ + "name":"ModifyDBClusterEndpoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDBClusterEndpointMessage"}, + "output":{ + "shape":"ModifyDBClusterEndpointOutput", + "resultWrapper":"ModifyDBClusterEndpointResult" + }, + "errors":[ + {"shape":"InvalidDBClusterStateFault"}, + {"shape":"InvalidDBClusterEndpointStateFault"}, + {"shape":"DBClusterEndpointNotFoundFault"}, + {"shape":"DBInstanceNotFoundFault"}, + {"shape":"InvalidDBInstanceStateFault"} + ], + "documentation":"

Modifies the properties of an endpoint in an Amazon Neptune DB cluster.

" + }, "ModifyDBClusterParameterGroup":{ "name":"ModifyDBClusterParameterGroup", "http":{ @@ -1092,6 +1167,10 @@ "RoleArn":{ "shape":"String", "documentation":"

The Amazon Resource Name (ARN) of the IAM role to associate with the Neptune DB cluster, for example arn:aws:iam::123456789012:role/NeptuneAccessRole.

" + }, + "FeatureName":{ + "shape":"String", + "documentation":"

The name of the feature for the Neptune DB cluster that the IAM role is to be associated with. For the list of supported feature names, see DBEngineVersion.

" } } }, @@ -1359,6 +1438,86 @@ "DBParameterGroup":{"shape":"DBParameterGroup"} } }, + "CreateDBClusterEndpointMessage":{ + "type":"structure", + "required":[ + "DBClusterIdentifier", + "DBClusterEndpointIdentifier", + "EndpointType" + ], + "members":{ + "DBClusterIdentifier":{ + "shape":"String", + "documentation":"

The DB cluster identifier of the DB cluster associated with the endpoint. This parameter is stored as a lowercase string.

" + }, + "DBClusterEndpointIdentifier":{ + "shape":"String", + "documentation":"

The identifier to use for the new endpoint. This parameter is stored as a lowercase string.

" + }, + "EndpointType":{ + "shape":"String", + "documentation":"

The type of the endpoint. One of: READER, WRITER, ANY.

" + }, + "StaticMembers":{ + "shape":"StringList", + "documentation":"

List of DB instance identifiers that are part of the custom endpoint group.

" + }, + "ExcludedMembers":{ + "shape":"StringList", + "documentation":"

List of DB instance identifiers that aren't part of the custom endpoint group. All other eligible instances are reachable through the custom endpoint. Only relevant if the list of static members is empty.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags to be assigned to the Amazon Neptune resource.

" + } + } + }, + "CreateDBClusterEndpointOutput":{ + "type":"structure", + "members":{ + "DBClusterEndpointIdentifier":{ + "shape":"String", + "documentation":"

The identifier associated with the endpoint. This parameter is stored as a lowercase string.

" + }, + "DBClusterIdentifier":{ + "shape":"String", + "documentation":"

The DB cluster identifier of the DB cluster associated with the endpoint. This parameter is stored as a lowercase string.

" + }, + "DBClusterEndpointResourceIdentifier":{ + "shape":"String", + "documentation":"

A unique system-generated identifier for an endpoint. It remains the same for the whole life of the endpoint.

" + }, + "Endpoint":{ + "shape":"String", + "documentation":"

The DNS address of the endpoint.

" + }, + "Status":{ + "shape":"String", + "documentation":"

The current status of the endpoint. One of: creating, available, deleting, inactive, modifying. The inactive state applies to an endpoint that cannot be used for a certain kind of cluster, such as a writer endpoint for a read-only secondary cluster in a global database.

" + }, + "EndpointType":{ + "shape":"String", + "documentation":"

The type of the endpoint. One of: READER, WRITER, CUSTOM.

" + }, + "CustomEndpointType":{ + "shape":"String", + "documentation":"

The type associated with a custom endpoint. One of: READER, WRITER, ANY.

" + }, + "StaticMembers":{ + "shape":"StringList", + "documentation":"

List of DB instance identifiers that are part of the custom endpoint group.

" + }, + "ExcludedMembers":{ + "shape":"StringList", + "documentation":"

List of DB instance identifiers that aren't part of the custom endpoint group. All other eligible instances are reachable through the custom endpoint. Only relevant if the list of static members is empty.

" + }, + "DBClusterEndpointArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) for the endpoint.

" + } + }, + "documentation":"

This data type represents the information you need to connect to an Amazon Neptune DB cluster. This data type is used as a response element in the following actions:

  • CreateDBClusterEndpoint

  • DescribeDBClusterEndpoints

  • ModifyDBClusterEndpoint

  • DeleteDBClusterEndpoint

For the data structure that represents Amazon Neptune DB instance endpoints, see Endpoint.

" + }, "CreateDBClusterMessage":{ "type":"structure", "required":[ @@ -1404,7 +1563,7 @@ }, "EngineVersion":{ "shape":"String", - "documentation":"

The version number of the database engine to use. Currently, setting this parameter has no effect.

Example: 1.0.1

" + "documentation":"

The version number of the database engine to use for the new DB cluster.

Example: 1.0.2.1

" }, "Port":{ "shape":"IntegerOptional", @@ -1452,7 +1611,7 @@ }, "EnableIAMDatabaseAuthentication":{ "shape":"BooleanOptional", - "documentation":"

True to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts, and otherwise false.

Default: false

" + "documentation":"

Not supported by Neptune.

" }, "EnableCloudwatchLogsExports":{ "shape":"LogTypeList", @@ -1992,6 +2151,108 @@ }, "exception":true }, + "DBClusterEndpoint":{ + "type":"structure", + "members":{ + "DBClusterEndpointIdentifier":{ + "shape":"String", + "documentation":"

The identifier associated with the endpoint. This parameter is stored as a lowercase string.

" + }, + "DBClusterIdentifier":{ + "shape":"String", + "documentation":"

The DB cluster identifier of the DB cluster associated with the endpoint. This parameter is stored as a lowercase string.

" + }, + "DBClusterEndpointResourceIdentifier":{ + "shape":"String", + "documentation":"

A unique system-generated identifier for an endpoint. It remains the same for the whole life of the endpoint.

" + }, + "Endpoint":{ + "shape":"String", + "documentation":"

The DNS address of the endpoint.

" + }, + "Status":{ + "shape":"String", + "documentation":"

The current status of the endpoint. One of: creating, available, deleting, inactive, modifying. The inactive state applies to an endpoint that cannot be used for a certain kind of cluster, such as a writer endpoint for a read-only secondary cluster in a global database.

" + }, + "EndpointType":{ + "shape":"String", + "documentation":"

The type of the endpoint. One of: READER, WRITER, CUSTOM.

" + }, + "CustomEndpointType":{ + "shape":"String", + "documentation":"

The type associated with a custom endpoint. One of: READER, WRITER, ANY.

" + }, + "StaticMembers":{ + "shape":"StringList", + "documentation":"

List of DB instance identifiers that are part of the custom endpoint group.

" + }, + "ExcludedMembers":{ + "shape":"StringList", + "documentation":"

List of DB instance identifiers that aren't part of the custom endpoint group. All other eligible instances are reachable through the custom endpoint. Only relevant if the list of static members is empty.

" + }, + "DBClusterEndpointArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) for the endpoint.

" + } + }, + "documentation":"

This data type represents the information you need to connect to an Amazon Neptune DB cluster. This data type is used as a response element in the following actions:

  • CreateDBClusterEndpoint

  • DescribeDBClusterEndpoints

  • ModifyDBClusterEndpoint

  • DeleteDBClusterEndpoint

For the data structure that represents Amazon Neptune DB instance endpoints, see Endpoint.

" + }, + "DBClusterEndpointAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The specified custom endpoint cannot be created because it already exists.

", + "error":{ + "code":"DBClusterEndpointAlreadyExistsFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBClusterEndpointList":{ + "type":"list", + "member":{ + "shape":"DBClusterEndpoint", + "locationName":"DBClusterEndpointList" + } + }, + "DBClusterEndpointMessage":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous DescribeDBClusterEndpoints request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + }, + "DBClusterEndpoints":{ + "shape":"DBClusterEndpointList", + "documentation":"

Contains the details of the endpoints associated with the cluster and matching any filter conditions.

" + } + } + }, + "DBClusterEndpointNotFoundFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The specified custom endpoint doesn't exist.

", + "error":{ + "code":"DBClusterEndpointNotFoundFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBClusterEndpointQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The cluster already has the maximum number of custom endpoints.

", + "error":{ + "code":"DBClusterEndpointQuotaExceededFault", + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, "DBClusterList":{ "type":"list", "member":{ @@ -2174,6 +2435,10 @@ "Status":{ "shape":"String", "documentation":"

Describes the state of association between the IAM role and the DB cluster. The Status property returns one of the following values:

  • ACTIVE - the IAM role ARN is associated with the DB cluster and can be used to access other AWS services on your behalf.

  • PENDING - the IAM role ARN is being associated with the DB cluster.

  • INVALID - the IAM role ARN is associated with the DB cluster, but the DB cluster is unable to assume the IAM role in order to access other AWS services on your behalf.

" + }, + "FeatureName":{ + "shape":"String", + "documentation":"

The name of the feature associated with the AWS Identity and Access Management (IAM) role. For the list of supported feature names, see DBEngineVersion.

" } }, "documentation":"

Describes an AWS Identity and Access Management (IAM) role that is associated with a DB cluster.

" @@ -3064,6 +3329,62 @@ }, "exception":true }, + "DeleteDBClusterEndpointMessage":{ + "type":"structure", + "required":["DBClusterEndpointIdentifier"], + "members":{ + "DBClusterEndpointIdentifier":{ + "shape":"String", + "documentation":"

The identifier associated with the custom endpoint. This parameter is stored as a lowercase string.

" + } + } + }, + "DeleteDBClusterEndpointOutput":{ + "type":"structure", + "members":{ + "DBClusterEndpointIdentifier":{ + "shape":"String", + "documentation":"

The identifier associated with the endpoint. This parameter is stored as a lowercase string.

" + }, + "DBClusterIdentifier":{ + "shape":"String", + "documentation":"

The DB cluster identifier of the DB cluster associated with the endpoint. This parameter is stored as a lowercase string.

" + }, + "DBClusterEndpointResourceIdentifier":{ + "shape":"String", + "documentation":"

A unique system-generated identifier for an endpoint. It remains the same for the whole life of the endpoint.

" + }, + "Endpoint":{ + "shape":"String", + "documentation":"

The DNS address of the endpoint.

" + }, + "Status":{ + "shape":"String", + "documentation":"

The current status of the endpoint. One of: creating, available, deleting, inactive, modifying. The inactive state applies to an endpoint that cannot be used for a certain kind of cluster, such as a writer endpoint for a read-only secondary cluster in a global database.

" + }, + "EndpointType":{ + "shape":"String", + "documentation":"

The type of the endpoint. One of: READER, WRITER, CUSTOM.

" + }, + "CustomEndpointType":{ + "shape":"String", + "documentation":"

The type associated with a custom endpoint. One of: READER, WRITER, ANY.

" + }, + "StaticMembers":{ + "shape":"StringList", + "documentation":"

List of DB instance identifiers that are part of the custom endpoint group.

" + }, + "ExcludedMembers":{ + "shape":"StringList", + "documentation":"

List of DB instance identifiers that aren't part of the custom endpoint group. All other eligible instances are reachable through the custom endpoint. Only relevant if the list of static members is empty.

" + }, + "DBClusterEndpointArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) for the endpoint.

" + } + }, + "documentation":"

This data type represents the information you need to connect to an Amazon Neptune DB cluster. This data type is used as a response element in the following actions:

  • CreateDBClusterEndpoint

  • DescribeDBClusterEndpoints

  • ModifyDBClusterEndpoint

  • DeleteDBClusterEndpoint

For the data structure that represents Amazon RDS DB instance endpoints, see Endpoint.

" + }, "DeleteDBClusterMessage":{ "type":"structure", "required":["DBClusterIdentifier"], @@ -3174,6 +3495,31 @@ "EventSubscription":{"shape":"EventSubscription"} } }, + "DescribeDBClusterEndpointsMessage":{ + "type":"structure", + "members":{ + "DBClusterIdentifier":{ + "shape":"String", + "documentation":"

The DB cluster identifier of the DB cluster associated with the endpoint. This parameter is stored as a lowercase string.

" + }, + "DBClusterEndpointIdentifier":{ + "shape":"String", + "documentation":"

The identifier of the endpoint to describe. This parameter is stored as a lowercase string.

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

A set of name-value pairs that define which endpoints to include in the output. The filters are specified as name-value pairs, in the format Name=endpoint_type,Values=endpoint_type1,endpoint_type2,.... Name can be one of: db-cluster-endpoint-type, db-cluster-endpoint-custom-type, db-cluster-endpoint-id, db-cluster-endpoint-status. Values for the db-cluster-endpoint-type filter can be one or more of: reader, writer, custom. Values for the db-cluster-endpoint-custom-type filter can be one or more of: reader, any. Values for the db-cluster-endpoint-status filter can be one or more of: available, creating, deleting, inactive, modifying.

" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so you can retrieve the remaining results.

Default: 100

Constraints: Minimum 20, maximum 100.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous DescribeDBClusterEndpoints request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + } + } + }, "DescribeDBClusterParameterGroupsMessage":{ "type":"structure", "members":{ @@ -3711,7 +4057,7 @@ "documentation":"

Specifies the ID that Amazon Route 53 assigns when you create a hosted zone.

" } }, - "documentation":"

Specifies a connection endpoint.

" + "documentation":"

Specifies a connection endpoint.

For the data structure that represents Amazon Neptune DB cluster endpoints, see DBClusterEndpoint.

" }, "EngineDefaults":{ "type":"structure", @@ -4000,6 +4346,18 @@ }, "Integer":{"type":"integer"}, "IntegerOptional":{"type":"integer"}, + "InvalidDBClusterEndpointStateFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The requested operation cannot be performed on the endpoint while the endpoint is in this state.

", + "error":{ + "code":"InvalidDBClusterEndpointStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, "InvalidDBClusterSnapshotStateFault":{ "type":"structure", "members":{ @@ -4178,6 +4536,74 @@ "type":"list", "member":{"shape":"String"} }, + "ModifyDBClusterEndpointMessage":{ + "type":"structure", + "required":["DBClusterEndpointIdentifier"], + "members":{ + "DBClusterEndpointIdentifier":{ + "shape":"String", + "documentation":"

The identifier of the endpoint to modify. This parameter is stored as a lowercase string.

" + }, + "EndpointType":{ + "shape":"String", + "documentation":"

The type of the endpoint. One of: READER, WRITER, ANY.

" + }, + "StaticMembers":{ + "shape":"StringList", + "documentation":"

List of DB instance identifiers that are part of the custom endpoint group.

" + }, + "ExcludedMembers":{ + "shape":"StringList", + "documentation":"

List of DB instance identifiers that aren't part of the custom endpoint group. All other eligible instances are reachable through the custom endpoint. Only relevant if the list of static members is empty.

" + } + } + }, + "ModifyDBClusterEndpointOutput":{ + "type":"structure", + "members":{ + "DBClusterEndpointIdentifier":{ + "shape":"String", + "documentation":"

The identifier associated with the endpoint. This parameter is stored as a lowercase string.

" + }, + "DBClusterIdentifier":{ + "shape":"String", + "documentation":"

The DB cluster identifier of the DB cluster associated with the endpoint. This parameter is stored as a lowercase string.

" + }, + "DBClusterEndpointResourceIdentifier":{ + "shape":"String", + "documentation":"

A unique system-generated identifier for an endpoint. It remains the same for the whole life of the endpoint.

" + }, + "Endpoint":{ + "shape":"String", + "documentation":"

The DNS address of the endpoint.

" + }, + "Status":{ + "shape":"String", + "documentation":"

The current status of the endpoint. One of: creating, available, deleting, inactive, modifying. The inactive state applies to an endpoint that cannot be used for a certain kind of cluster, such as a writer endpoint for a read-only secondary cluster in a global database.

" + }, + "EndpointType":{ + "shape":"String", + "documentation":"

The type of the endpoint. One of: READER, WRITER, CUSTOM.

" + }, + "CustomEndpointType":{ + "shape":"String", + "documentation":"

The type associated with a custom endpoint. One of: READER, WRITER, ANY.

" + }, + "StaticMembers":{ + "shape":"StringList", + "documentation":"

List of DB instance identifiers that are part of the custom endpoint group.

" + }, + "ExcludedMembers":{ + "shape":"StringList", + "documentation":"

List of DB instance identifiers that aren't part of the custom endpoint group. All other eligible instances are reachable through the custom endpoint. Only relevant if the list of static members is empty.

" + }, + "DBClusterEndpointArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) for the endpoint.

" + } + }, + "documentation":"

This data type represents the information you need to connect to an Amazon Aurora DB cluster. This data type is used as a response element in the following actions:

  • CreateDBClusterEndpoint

  • DescribeDBClusterEndpoints

  • ModifyDBClusterEndpoint

  • DeleteDBClusterEndpoint

For the data structure that represents Amazon RDS DB instance endpoints, see Endpoint.

" + }, "ModifyDBClusterMessage":{ "type":"structure", "required":["DBClusterIdentifier"], @@ -4236,7 +4662,7 @@ }, "EngineVersion":{ "shape":"String", - "documentation":"

The version number of the database engine. Currently, setting this parameter has no effect. To upgrade your database engine to the most recent release, use the ApplyPendingMaintenanceAction API.

For a list of valid engine versions, see CreateDBInstance, or call DescribeDBEngineVersions.

" + "documentation":"

The version number of the database engine to which you want to upgrade. Changing this parameter results in an outage. The change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true.

For a list of valid engine versions, see Engine Releases for Amazon Neptune, or call DescribeDBEngineVersions.

" }, "DeletionProtection":{ "shape":"BooleanOptional", @@ -4969,6 +5395,10 @@ "RoleArn":{ "shape":"String", "documentation":"

The Amazon Resource Name (ARN) of the IAM role to disassociate from the DB cluster, for example arn:aws:iam::123456789012:role/NeptuneAccessRole.

" + }, + "FeatureName":{ + "shape":"String", + "documentation":"

The name of the feature for the DB cluster that the IAM role is to be disassociated from. For the list of supported feature names, see DBEngineVersion.

" } } }, @@ -5377,6 +5807,10 @@ "exception":true }, "String":{"type":"string"}, + "StringList":{ + "type":"list", + "member":{"shape":"String"} + }, "Subnet":{ "type":"structure", "members":{ diff --git a/services/networkfirewall/pom.xml b/services/networkfirewall/pom.xml new file mode 100644 index 000000000000..caa0d6c05873 --- /dev/null +++ b/services/networkfirewall/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.40-SNAPSHOT + + networkfirewall + AWS Java SDK :: Services :: Network Firewall + The AWS Java SDK for Network Firewall module holds the client classes that are used for + communicating with Network Firewall. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.networkfirewall + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/networkfirewall/src/main/resources/codegen-resources/paginators-1.json b/services/networkfirewall/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..9c4b18bbd77b --- /dev/null +++ b/services/networkfirewall/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,28 @@ +{ + "pagination": { + "ListFirewallPolicies": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "FirewallPolicies" + }, + "ListFirewalls": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Firewalls" + }, + "ListRuleGroups": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "RuleGroups" + }, + "ListTagsForResource": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Tags" + } + } +} diff --git a/services/networkfirewall/src/main/resources/codegen-resources/service-2.json b/services/networkfirewall/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..a3c890097acf --- /dev/null +++ b/services/networkfirewall/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,2752 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2020-11-12", + "endpointPrefix":"network-firewall", + "jsonVersion":"1.0", + "protocol":"json", + "serviceAbbreviation":"Network Firewall", + "serviceFullName":"AWS Network Firewall", + "serviceId":"Network Firewall", + "signatureVersion":"v4", + "signingName":"network-firewall", + "targetPrefix":"NetworkFirewall_20201112", + "uid":"network-firewall-2020-11-12" + }, + "operations":{ + "AssociateFirewallPolicy":{ + "name":"AssociateFirewallPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssociateFirewallPolicyRequest"}, + "output":{"shape":"AssociateFirewallPolicyResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServerError"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidTokenException"}, + {"shape":"InvalidOperationException"} + ], + "documentation":"

Associates a FirewallPolicy to a Firewall.

A firewall policy defines how to monitor and manage your VPC network traffic, using a collection of inspection rule groups and other settings. Each firewall requires one firewall policy association, and you can use the same firewall policy for multiple firewalls.

" + }, + "AssociateSubnets":{ + "name":"AssociateSubnets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssociateSubnetsRequest"}, + "output":{"shape":"AssociateSubnetsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServerError"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidTokenException"}, + {"shape":"InvalidOperationException"} + ], + "documentation":"

Associates the specified subnets in the Amazon VPC to the firewall. You can specify one subnet for each of the Availability Zones that the VPC spans.

This request creates an AWS Network Firewall firewall endpoint in each of the subnets. To enable the firewall's protections, you must also modify the VPC's route tables for each subnet's Availability Zone, to redirect the traffic that's coming into and going out of the zone through the firewall endpoint.

" + }, + "CreateFirewall":{ + "name":"CreateFirewall", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateFirewallRequest"}, + "output":{"shape":"CreateFirewallResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalServerError"}, + {"shape":"ThrottlingException"}, + {"shape":"InsufficientCapacityException"}, + {"shape":"InvalidOperationException"} + ], + "documentation":"

Creates an AWS Network Firewall Firewall and accompanying FirewallStatus for a VPC.

The firewall defines the configuration settings for an AWS Network Firewall firewall. The settings that you can define at creation include the firewall policy, the subnets in your VPC to use for the firewall endpoints, and any tags that are attached to the firewall AWS resource.

After you create a firewall, you can provide additional settings, like the logging configuration.

To update the settings for a firewall, you use the operations that apply to the settings themselves, for example UpdateLoggingConfiguration, AssociateSubnets, and UpdateFirewallDeleteProtection.

To manage a firewall's tags, use the standard AWS resource tagging operations, ListTagsForResource, TagResource, and UntagResource.

To retrieve information about firewalls, use ListFirewalls and DescribeFirewall.

" + }, + "CreateFirewallPolicy":{ + "name":"CreateFirewallPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateFirewallPolicyRequest"}, + "output":{"shape":"CreateFirewallPolicyResponse"}, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerError"}, + {"shape":"InsufficientCapacityException"} + ], + "documentation":"

Creates the firewall policy for the firewall according to the specifications.

An AWS Network Firewall firewall policy defines the behavior of a firewall, in a collection of stateless and stateful rule groups and other settings. You can use one firewall policy for multiple firewalls.

" + }, + "CreateRuleGroup":{ + "name":"CreateRuleGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateRuleGroupRequest"}, + "output":{"shape":"CreateRuleGroupResponse"}, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerError"}, + {"shape":"InsufficientCapacityException"} + ], + "documentation":"

Creates the specified stateless or stateful rule group, which includes the rules for network traffic inspection, a capacity setting, and tags.

You provide your rule group specification in your request using either RuleGroup or Rules.

" + }, + "DeleteFirewall":{ + "name":"DeleteFirewall", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteFirewallRequest"}, + "output":{"shape":"DeleteFirewallResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServerError"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"InvalidOperationException"} + ], + "documentation":"

Deletes the specified Firewall and its FirewallStatus. This operation requires the firewall's DeleteProtection flag to be FALSE. You can't revert this operation.

You can check whether a firewall is in use by reviewing the route tables for the Availability Zones where you have firewall subnet mappings. Retrieve the subnet mappings by calling DescribeFirewall. You define and update the route tables through Amazon VPC. As needed, update the route tables for the zones to remove the firewall endpoints. When the route tables no longer use the firewall endpoints, you can remove the firewall safely.

To delete a firewall, remove the delete protection if you need to using UpdateFirewallDeleteProtection, then delete the firewall by calling DeleteFirewall.

" + }, + "DeleteFirewallPolicy":{ + "name":"DeleteFirewallPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteFirewallPolicyRequest"}, + "output":{"shape":"DeleteFirewallPolicyResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerError"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"InvalidOperationException"} + ], + "documentation":"

Deletes the specified FirewallPolicy.

" + }, + "DeleteResourcePolicy":{ + "name":"DeleteResourcePolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteResourcePolicyRequest"}, + "output":{"shape":"DeleteResourcePolicyResponse"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Deletes a resource policy that you created in a PutResourcePolicy request.

" + }, + "DeleteRuleGroup":{ + "name":"DeleteRuleGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteRuleGroupRequest"}, + "output":{"shape":"DeleteRuleGroupResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerError"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"InvalidOperationException"} + ], + "documentation":"

Deletes the specified RuleGroup.

" + }, + "DescribeFirewall":{ + "name":"DescribeFirewall", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeFirewallRequest"}, + "output":{"shape":"DescribeFirewallResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServerError"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Returns the data objects for the specified firewall.

" + }, + "DescribeFirewallPolicy":{ + "name":"DescribeFirewallPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeFirewallPolicyRequest"}, + "output":{"shape":"DescribeFirewallPolicyResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerError"} + ], + "documentation":"

Returns the data objects for the specified firewall policy.

" + }, + "DescribeLoggingConfiguration":{ + "name":"DescribeLoggingConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeLoggingConfigurationRequest"}, + "output":{"shape":"DescribeLoggingConfigurationResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServerError"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Returns the logging configuration for the specified firewall.

" + }, + "DescribeResourcePolicy":{ + "name":"DescribeResourcePolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeResourcePolicyRequest"}, + "output":{"shape":"DescribeResourcePolicyResponse"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Retrieves a resource policy that you created in a PutResourcePolicy request.

" + }, + "DescribeRuleGroup":{ + "name":"DescribeRuleGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeRuleGroupRequest"}, + "output":{"shape":"DescribeRuleGroupResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerError"} + ], + "documentation":"

Returns the data objects for the specified rule group.

" + }, + "DisassociateSubnets":{ + "name":"DisassociateSubnets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisassociateSubnetsRequest"}, + "output":{"shape":"DisassociateSubnetsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServerError"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidTokenException"}, + {"shape":"InvalidOperationException"} + ], + "documentation":"

Removes the specified subnet associations from the firewall. This removes the firewall endpoints from the subnets and removes any network filtering protections that the endpoints were providing.

" + }, + "ListFirewallPolicies":{ + "name":"ListFirewallPolicies", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListFirewallPoliciesRequest"}, + "output":{"shape":"ListFirewallPoliciesResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerError"} + ], + "documentation":"

Retrieves the metadata for the firewall policies that you have defined. Depending on your setting for max results and the number of firewall policies, a single call might not return the full list.

" + }, + "ListFirewalls":{ + "name":"ListFirewalls", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListFirewallsRequest"}, + "output":{"shape":"ListFirewallsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServerError"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Retrieves the metadata for the firewalls that you have defined. If you provide VPC identifiers in your request, this returns only the firewalls for those VPCs.

Depending on your setting for max results and the number of firewalls, a single call might not return the full list.

" + }, + "ListRuleGroups":{ + "name":"ListRuleGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListRuleGroupsRequest"}, + "output":{"shape":"ListRuleGroupsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerError"} + ], + "documentation":"

Retrieves the metadata for the rule groups that you have defined. Depending on your setting for max results and the number of rule groups, a single call might not return the full list.

" + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Retrieves the tags associated with the specified resource. Tags are key:value pairs that you can use to categorize and manage your resources, for purposes like billing. For example, you might set the tag key to \"customer\" and the value to the customer name or ID. You can specify one or more tags to add to each AWS resource, up to 50 tags for a resource.

You can tag the AWS resources that you manage through AWS Network Firewall: firewalls, firewall policies, and rule groups.

" + }, + "PutResourcePolicy":{ + "name":"PutResourcePolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutResourcePolicyRequest"}, + "output":{"shape":"PutResourcePolicyResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServerError"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidResourcePolicyException"} + ], + "documentation":"

Creates or updates an AWS Identity and Access Management policy for your rule group or firewall policy. Use this to share rule groups and firewall policies between accounts. This operation works in conjunction with the AWS Resource Access Manager (RAM) service to manage resource sharing for Network Firewall.

Use this operation to create or update a resource policy for your rule group or firewall policy. In the policy, you specify the accounts that you want to share the resource with and the operations that you want the accounts to be able to perform.

When you add an account in the resource policy, you then run the following Resource Access Manager (RAM) operations to access and accept the shared rule group or firewall policy.

For additional information about resource sharing using RAM, see AWS Resource Access Manager User Guide.

" + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

Adds the specified tags to the specified resource. Tags are key:value pairs that you can use to categorize and manage your resources, for purposes like billing. For example, you might set the tag key to \"customer\" and the value to the customer name or ID. You can specify one or more tags to add to each AWS resource, up to 50 tags for a resource.

You can tag the AWS resources that you manage through AWS Network Firewall: firewalls, firewall policies, and rule groups.

" + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

Removes the tags with the specified keys from the specified resource. Tags are key:value pairs that you can use to categorize and manage your resources, for purposes like billing. For example, you might set the tag key to \"customer\" and the value to the customer name or ID. You can specify one or more tags to add to each AWS resource, up to 50 tags for a resource.

You can manage tags for the AWS resources that you manage through AWS Network Firewall: firewalls, firewall policies, and rule groups.

" + }, + "UpdateFirewallDeleteProtection":{ + "name":"UpdateFirewallDeleteProtection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateFirewallDeleteProtectionRequest"}, + "output":{"shape":"UpdateFirewallDeleteProtectionResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServerError"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidTokenException"}, + {"shape":"ResourceOwnerCheckException"} + ], + "documentation":"

Modifies the flag, DeleteProtection, which indicates whether it is possible to delete the firewall. If the flag is set to TRUE, the firewall is protected against deletion. This setting helps protect against accidentally deleting a firewall that's in use.

" + }, + "UpdateFirewallDescription":{ + "name":"UpdateFirewallDescription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateFirewallDescriptionRequest"}, + "output":{"shape":"UpdateFirewallDescriptionResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServerError"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidTokenException"} + ], + "documentation":"

Modifies the description for the specified firewall. Use the description to help you identify the firewall when you're working with it.

" + }, + "UpdateFirewallPolicy":{ + "name":"UpdateFirewallPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateFirewallPolicyRequest"}, + "output":{"shape":"UpdateFirewallPolicyResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerError"}, + {"shape":"InvalidTokenException"} + ], + "documentation":"

Updates the properties of the specified firewall policy.

" + }, + "UpdateFirewallPolicyChangeProtection":{ + "name":"UpdateFirewallPolicyChangeProtection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateFirewallPolicyChangeProtectionRequest"}, + "output":{"shape":"UpdateFirewallPolicyChangeProtectionResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServerError"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidTokenException"}, + {"shape":"ResourceOwnerCheckException"} + ], + "documentation":"

" + }, + "UpdateLoggingConfiguration":{ + "name":"UpdateLoggingConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateLoggingConfigurationRequest"}, + "output":{"shape":"UpdateLoggingConfigurationResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServerError"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidTokenException"}, + {"shape":"LogDestinationPermissionException"} + ], + "documentation":"

Sets the logging configuration for the specified firewall.

To change the logging configuration, retrieve the LoggingConfiguration by calling DescribeLoggingConfiguration, then change it and provide the modified object to this update call. You must change the logging configuration one LogDestinationConfig at a time inside the retrieved LoggingConfiguration object.

You can perform only one of the following actions in any call to UpdateLoggingConfiguration:

  • Create a new log destination object by adding a single LogDestinationConfig array element to LogDestinationConfigs.

  • Delete a log destination object by removing a single LogDestinationConfig array element from LogDestinationConfigs.

  • Change the LogDestination setting in a single LogDestinationConfig array element.

You can't change the LogDestinationType or LogType in a LogDestinationConfig. To change these settings, delete the existing LogDestinationConfig object and create a new one, using two separate calls to this update operation.

" + }, + "UpdateRuleGroup":{ + "name":"UpdateRuleGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateRuleGroupRequest"}, + "output":{"shape":"UpdateRuleGroupResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerError"}, + {"shape":"InvalidTokenException"} + ], + "documentation":"

Updates the rule settings for the specified rule group. You use a rule group by reference in one or more firewall policies. When you modify a rule group, you modify all firewall policies that use the rule group.

To update a rule group, first call DescribeRuleGroup to retrieve the current RuleGroup object, update the object as needed, and then provide the updated object to this call.

" + }, + "UpdateSubnetChangeProtection":{ + "name":"UpdateSubnetChangeProtection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateSubnetChangeProtectionRequest"}, + "output":{"shape":"UpdateSubnetChangeProtectionResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServerError"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidTokenException"}, + {"shape":"ResourceOwnerCheckException"} + ], + "documentation":"

" + } + }, + "shapes":{ + "ActionDefinition":{ + "type":"structure", + "members":{ + "PublishMetricAction":{ + "shape":"PublishMetricAction", + "documentation":"

Stateless inspection criteria that publishes the specified metrics to Amazon CloudWatch for the matching packet. This setting defines a CloudWatch dimension value to be published.

You can pair this custom action with any of the standard stateless rule actions. For example, you could pair this in a rule action with the standard action that forwards the packet for stateful inspection. Then, when a packet matches the rule, Network Firewall publishes metrics for the packet and forwards it.

" + } + }, + "documentation":"

A custom action to use in stateless rule actions settings. This is used in CustomAction.

" + }, + "ActionName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[a-zA-Z0-9]+$" + }, + "Address":{ + "type":"structure", + "required":["AddressDefinition"], + "members":{ + "AddressDefinition":{ + "shape":"AddressDefinition", + "documentation":"

Specify an IP address or a block of IP addresses in Classless Inter-Domain Routing (CIDR) notation. Network Firewall supports all address ranges for IPv4.

Examples:

  • To configure Network Firewall to inspect for the IP address 192.0.2.44, specify 192.0.2.44/32.

  • To configure Network Firewall to inspect for IP addresses from 192.0.2.0 to 192.0.2.255, specify 192.0.2.0/24.

For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.

" + } + }, + "documentation":"

A single IP address specification. This is used in the MatchAttributes source and destination specifications.

" + }, + "AddressDefinition":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^([a-fA-F\\d:\\.]+/\\d{1,3})$" + }, + "Addresses":{ + "type":"list", + "member":{"shape":"Address"} + }, + "AssociateFirewallPolicyRequest":{ + "type":"structure", + "required":["FirewallPolicyArn"], + "members":{ + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

An optional token that you can use for optimistic locking. Network Firewall returns a token to your requests that access the firewall. The token marks the state of the firewall resource at the time of the request.

To make an unconditional change to the firewall, omit the token in your update request. Without the token, Network Firewall performs your updates regardless of whether the firewall has changed since you last retrieved it.

To make a conditional change to the firewall, provide the token in your update request. Network Firewall uses the token to ensure that the firewall hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the firewall again to get a current copy of it with a new token. Reapply your changes as needed, then try the operation again using the new token.

" + }, + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the firewall.

You must specify the ARN or the name, and you can specify both.

" + }, + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

The descriptive name of the firewall. You can't change the name of a firewall after you create it.

You must specify the ARN or the name, and you can specify both.

" + }, + "FirewallPolicyArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the firewall policy.

" + } + } + }, + "AssociateFirewallPolicyResponse":{ + "type":"structure", + "members":{ + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the firewall.

" + }, + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

The descriptive name of the firewall. You can't change the name of a firewall after you create it.

" + }, + "FirewallPolicyArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the firewall policy.

" + }, + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

An optional token that you can use for optimistic locking. Network Firewall returns a token to your requests that access the firewall. The token marks the state of the firewall resource at the time of the request.

To make an unconditional change to the firewall, omit the token in your update request. Without the token, Network Firewall performs your updates regardless of whether the firewall has changed since you last retrieved it.

To make a conditional change to the firewall, provide the token in your update request. Network Firewall uses the token to ensure that the firewall hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the firewall again to get a current copy of it with a new token. Reapply your changes as needed, then try the operation again using the new token.

" + } + } + }, + "AssociateSubnetsRequest":{ + "type":"structure", + "required":["SubnetMappings"], + "members":{ + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

An optional token that you can use for optimistic locking. Network Firewall returns a token to your requests that access the firewall. The token marks the state of the firewall resource at the time of the request.

To make an unconditional change to the firewall, omit the token in your update request. Without the token, Network Firewall performs your updates regardless of whether the firewall has changed since you last retrieved it.

To make a conditional change to the firewall, provide the token in your update request. Network Firewall uses the token to ensure that the firewall hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the firewall again to get a current copy of it with a new token. Reapply your changes as needed, then try the operation again using the new token.

" + }, + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the firewall.

You must specify the ARN or the name, and you can specify both.

" + }, + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

The descriptive name of the firewall. You can't change the name of a firewall after you create it.

You must specify the ARN or the name, and you can specify both.

" + }, + "SubnetMappings":{ + "shape":"SubnetMappings", + "documentation":"

The IDs of the subnets that you want to associate with the firewall.

" + } + } + }, + "AssociateSubnetsResponse":{ + "type":"structure", + "members":{ + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the firewall.

" + }, + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

The descriptive name of the firewall. You can't change the name of a firewall after you create it.

" + }, + "SubnetMappings":{ + "shape":"SubnetMappings", + "documentation":"

The IDs of the subnets that are associated with the firewall.

" + }, + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

An optional token that you can use for optimistic locking. Network Firewall returns a token to your requests that access the firewall. The token marks the state of the firewall resource at the time of the request.

To make an unconditional change to the firewall, omit the token in your update request. Without the token, Network Firewall performs your updates regardless of whether the firewall has changed since you last retrieved it.

To make a conditional change to the firewall, provide the token in your update request. Network Firewall uses the token to ensure that the firewall hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the firewall again to get a current copy of it with a new token. Reapply your changes as needed, then try the operation again using the new token.

" + } + } + }, + "Attachment":{ + "type":"structure", + "members":{ + "SubnetId":{ + "shape":"AzSubnet", + "documentation":"

The unique identifier of the subnet that you've specified to be used for a firewall endpoint.

" + }, + "EndpointId":{ + "shape":"EndpointId", + "documentation":"

The identifier of the firewall endpoint that Network Firewall has instantiated in the subnet. You use this to identify the firewall endpoint in the VPC route tables, when you redirect the VPC traffic through the endpoint.

" + }, + "Status":{ + "shape":"AttachmentStatus", + "documentation":"

The current status of the firewall endpoint in the subnet. This value reflects both the instantiation of the endpoint in the VPC subnet and the sync states that are reported in the Config settings. When this value is READY, the endpoint is available and configured properly to handle network traffic. When the endpoint isn't available for traffic, this value will reflect its state, for example CREATING, DELETING, or FAILED.

" + } + }, + "documentation":"

The configuration and status for a single subnet that you've specified for use by the AWS Network Firewall firewall. This is part of the FirewallStatus.

" + }, + "AttachmentStatus":{ + "type":"string", + "enum":[ + "CREATING", + "DELETING", + "SCALING", + "READY" + ] + }, + "AvailabilityZone":{"type":"string"}, + "AzSubnet":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^subnet-[0-9a-f]+$" + }, + "AzSubnets":{ + "type":"list", + "member":{"shape":"AzSubnet"} + }, + "Boolean":{"type":"boolean"}, + "CollectionMember_String":{"type":"string"}, + "ConfigurationSyncState":{ + "type":"string", + "enum":[ + "PENDING", + "IN_SYNC" + ] + }, + "CreateFirewallPolicyRequest":{ + "type":"structure", + "required":[ + "FirewallPolicyName", + "FirewallPolicy" + ], + "members":{ + "FirewallPolicyName":{ + "shape":"ResourceName", + "documentation":"

The descriptive name of the firewall policy. You can't change the name of a firewall policy after you create it.

" + }, + "FirewallPolicy":{ + "shape":"FirewallPolicy", + "documentation":"

The rule groups and policy actions to use in the firewall policy.

" + }, + "Description":{ + "shape":"Description", + "documentation":"

A description of the firewall policy.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The key:value pairs to associate with the resource.

" + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

Indicates whether you want Network Firewall to just check the validity of the request, rather than run the request.

If set to TRUE, Network Firewall checks whether the request can run successfully, but doesn't actually make the requested changes. The call returns the value that the request would return if you ran it with dry run set to FALSE, but doesn't make additions or changes to your resources. This option allows you to make sure that you have the required permissions to run the request and that your request parameters are valid.

If set to FALSE, Network Firewall makes the requested changes to your resources.

" + } + } + }, + "CreateFirewallPolicyResponse":{ + "type":"structure", + "required":[ + "UpdateToken", + "FirewallPolicyResponse" + ], + "members":{ + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

A token used for optimistic locking. Network Firewall returns a token to your requests that access the firewall policy. The token marks the state of the policy resource at the time of the request.

To make changes to the policy, you provide the token in your request. Network Firewall uses the token to ensure that the policy hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the firewall policy again to get a current copy of it with current token. Reapply your changes as needed, then try the operation again using the new token.

" + }, + "FirewallPolicyResponse":{ + "shape":"FirewallPolicyResponse", + "documentation":"

The high-level properties of a firewall policy. This, along with the FirewallPolicy, define the policy. You can retrieve all objects for a firewall policy by calling DescribeFirewallPolicy.

" + } + } + }, + "CreateFirewallRequest":{ + "type":"structure", + "required":[ + "FirewallName", + "FirewallPolicyArn", + "VpcId", + "SubnetMappings" + ], + "members":{ + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

The descriptive name of the firewall. You can't change the name of a firewall after you create it.

" + }, + "FirewallPolicyArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the FirewallPolicy that you want to use for the firewall.

" + }, + "VpcId":{ + "shape":"VpcId", + "documentation":"

The unique identifier of the VPC where Network Firewall should create the firewall.

You can't change this setting after you create the firewall.

" + }, + "SubnetMappings":{ + "shape":"SubnetMappings", + "documentation":"

The public subnets to use for your Network Firewall firewalls. Each subnet must belong to a different Availability Zone in the VPC. Network Firewall creates a firewall endpoint in each subnet.

" + }, + "DeleteProtection":{ + "shape":"Boolean", + "documentation":"

A flag indicating whether it is possible to delete the firewall. A setting of TRUE indicates that the firewall is protected against deletion. Use this setting to protect against accidentally deleting a firewall that is in use. When you create a firewall, the operation initializes this flag to TRUE.

" + }, + "SubnetChangeProtection":{ + "shape":"Boolean", + "documentation":"

A setting indicating whether the firewall is protected against changes to the subnet associations. Use this setting to protect against accidentally modifying the subnet associations for a firewall that is in use. When you create a firewall, the operation initializes this setting to TRUE.

" + }, + "FirewallPolicyChangeProtection":{ + "shape":"Boolean", + "documentation":"

A setting indicating whether the firewall is protected against a change to the firewall policy association. Use this setting to protect against accidentally modifying the firewall policy for a firewall that is in use. When you create a firewall, the operation initializes this setting to TRUE.

" + }, + "Description":{ + "shape":"Description", + "documentation":"

A description of the firewall.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The key:value pairs to associate with the resource.

" + } + } + }, + "CreateFirewallResponse":{ + "type":"structure", + "members":{ + "Firewall":{ + "shape":"Firewall", + "documentation":"

The configuration settings for the firewall. These settings include the firewall policy and the subnets in your VPC to use for the firewall endpoints.

" + }, + "FirewallStatus":{ + "shape":"FirewallStatus", + "documentation":"

Detailed information about the current status of a Firewall. You can retrieve this for a firewall by calling DescribeFirewall and providing the firewall name and ARN.

" + } + } + }, + "CreateRuleGroupRequest":{ + "type":"structure", + "required":[ + "RuleGroupName", + "Type", + "Capacity" + ], + "members":{ + "RuleGroupName":{ + "shape":"ResourceName", + "documentation":"

The descriptive name of the rule group. You can't change the name of a rule group after you create it.

" + }, + "RuleGroup":{ + "shape":"RuleGroup", + "documentation":"

An object that defines the rule group rules.

You must provide either this rule group setting or a Rules setting, but not both.

" + }, + "Rules":{ + "shape":"RulesString", + "documentation":"

The name of a file containing stateful rule group rules specifications in Suricata flat format, with one rule per line. Use this to import your existing Suricata compatible rule groups.

You must provide either this rules setting or a populated RuleGroup setting, but not both.

You can provide your rule group specification in a file through this setting when you create or update your rule group. The call response returns a RuleGroup object that Network Firewall has populated from your file. Network Firewall uses the file contents to populate the rule group rules, but does not maintain a reference to the file or use the file in any way after performing the create or update. If you call DescribeRuleGroup to retrieve the rule group, Network Firewall returns rules settings inside a RuleGroup object.

" + }, + "Type":{ + "shape":"RuleGroupType", + "documentation":"

Indicates whether the rule group is stateless or stateful. If the rule group is stateless, it contains stateless rules. If it is stateful, it contains stateful rules.

" + }, + "Description":{ + "shape":"Description", + "documentation":"

A description of the rule group.

" + }, + "Capacity":{ + "shape":"RuleCapacity", + "documentation":"

The maximum operating resources that this rule group can use. Rule group capacity is fixed at creation. When you update a rule group, you are limited to this capacity. When you reference a rule group from a firewall policy, Network Firewall reserves this capacity for the rule group.

You can retrieve the capacity that would be required for a rule group before you create the rule group by calling CreateRuleGroup with DryRun set to TRUE.

You can't change or exceed this capacity when you update the rule group, so leave room for your rule group to grow.

Capacity for a stateless rule group

For a stateless rule group, the capacity required is the sum of the capacity requirements of the individual rules that you expect to have in the rule group.

To calculate the capacity requirement of a single rule, multiply the capacity requirement values of each of the rule's match settings:

  • A match setting with no criteria specified has a value of 1.

  • A match setting with Any specified has a value of 1.

  • All other match settings have a value equal to the number of elements provided in the setting. For example, a protocol setting [\"UDP\"] and a source setting [\"10.0.0.0/24\"] each have a value of 1. A protocol setting [\"UDP\",\"TCP\"] has a value of 2. A source setting [\"10.0.0.0/24\",\"10.0.0.1/24\",\"10.0.0.2/24\"] has a value of 3.

A rule with no criteria specified in any of its match settings has a capacity requirement of 1. A rule with protocol setting [\"UDP\",\"TCP\"], source setting [\"10.0.0.0/24\",\"10.0.0.1/24\",\"10.0.0.2/24\"], and a single specification or no specification for each of the other match settings has a capacity requirement of 6.

Capacity for a stateful rule group

For a stateful rule group, the minimum capacity required is the number of individual rules that you expect to have in the rule group.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The key:value pairs to associate with the resource.

" + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

Indicates whether you want Network Firewall to just check the validity of the request, rather than run the request.

If set to TRUE, Network Firewall checks whether the request can run successfully, but doesn't actually make the requested changes. The call returns the value that the request would return if you ran it with dry run set to FALSE, but doesn't make additions or changes to your resources. This option allows you to make sure that you have the required permissions to run the request and that your request parameters are valid.

If set to FALSE, Network Firewall makes the requested changes to your resources.

" + } + } + }, + "CreateRuleGroupResponse":{ + "type":"structure", + "required":[ + "UpdateToken", + "RuleGroupResponse" + ], + "members":{ + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

A token used for optimistic locking. Network Firewall returns a token to your requests that access the rule group. The token marks the state of the rule group resource at the time of the request.

To make changes to the rule group, you provide the token in your request. Network Firewall uses the token to ensure that the rule group hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the rule group again to get a current copy of it with a current token. Reapply your changes as needed, then try the operation again using the new token.

" + }, + "RuleGroupResponse":{ + "shape":"RuleGroupResponse", + "documentation":"

The high-level properties of a rule group. This, along with the RuleGroup, define the rule group. You can retrieve all objects for a rule group by calling DescribeRuleGroup.

" + } + } + }, + "CustomAction":{ + "type":"structure", + "required":[ + "ActionName", + "ActionDefinition" + ], + "members":{ + "ActionName":{ + "shape":"ActionName", + "documentation":"

The descriptive name of the custom action. You can't change the name of a custom action after you create it.

" + }, + "ActionDefinition":{ + "shape":"ActionDefinition", + "documentation":"

The custom action associated with the action name.

" + } + }, + "documentation":"

An optional, non-standard action to use for stateless packet handling. You can define this in addition to the standard action that you must specify.

You define and name the custom actions that you want to be able to use, and then you reference them by name in your actions settings.

You can use custom actions in the following places:

  • In a rule group's StatelessRulesAndCustomActions specification. The custom actions are available for use by name inside the StatelessRulesAndCustomActions where you define them. You can use them for your stateless rule actions to specify what to do with a packet that matches the rule's match attributes.

  • In a FirewallPolicy specification, in StatelessCustomActions. The custom actions are available for use inside the policy where you define them. You can use them for the policy's default stateless actions settings to specify what to do with packets that don't match any of the policy's stateless rules.

" + }, + "CustomActions":{ + "type":"list", + "member":{"shape":"CustomAction"} + }, + "DeleteFirewallPolicyRequest":{ + "type":"structure", + "members":{ + "FirewallPolicyName":{ + "shape":"ResourceName", + "documentation":"

The descriptive name of the firewall policy. You can't change the name of a firewall policy after you create it.

You must specify the ARN or the name, and you can specify both.

" + }, + "FirewallPolicyArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the firewall policy.

You must specify the ARN or the name, and you can specify both.

" + } + } + }, + "DeleteFirewallPolicyResponse":{ + "type":"structure", + "required":["FirewallPolicyResponse"], + "members":{ + "FirewallPolicyResponse":{ + "shape":"FirewallPolicyResponse", + "documentation":"

The object containing the definition of the FirewallPolicyResponse that you asked to delete.

" + } + } + }, + "DeleteFirewallRequest":{ + "type":"structure", + "members":{ + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

The descriptive name of the firewall. You can't change the name of a firewall after you create it.

You must specify the ARN or the name, and you can specify both.

" + }, + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the firewall.

You must specify the ARN or the name, and you can specify both.

" + } + } + }, + "DeleteFirewallResponse":{ + "type":"structure", + "members":{ + "Firewall":{"shape":"Firewall"}, + "FirewallStatus":{"shape":"FirewallStatus"} + } + }, + "DeleteResourcePolicyRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the rule group or firewall policy whose resource policy you want to delete.

" + } + } + }, + "DeleteResourcePolicyResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteRuleGroupRequest":{ + "type":"structure", + "members":{ + "RuleGroupName":{ + "shape":"ResourceName", + "documentation":"

The descriptive name of the rule group. You can't change the name of a rule group after you create it.

You must specify the ARN or the name, and you can specify both.

" + }, + "RuleGroupArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the rule group.

You must specify the ARN or the name, and you can specify both.

" + }, + "Type":{ + "shape":"RuleGroupType", + "documentation":"

Indicates whether the rule group is stateless or stateful. If the rule group is stateless, it contains stateless rules. If it is stateful, it contains stateful rules.

This setting is required for requests that do not include the RuleGroupARN.

" + } + } + }, + "DeleteRuleGroupResponse":{ + "type":"structure", + "required":["RuleGroupResponse"], + "members":{ + "RuleGroupResponse":{ + "shape":"RuleGroupResponse", + "documentation":"

The high-level properties of a rule group. This, along with the RuleGroup, define the rule group. You can retrieve all objects for a rule group by calling DescribeRuleGroup.

" + } + } + }, + "DescribeFirewallPolicyRequest":{ + "type":"structure", + "members":{ + "FirewallPolicyName":{ + "shape":"ResourceName", + "documentation":"

The descriptive name of the firewall policy. You can't change the name of a firewall policy after you create it.

You must specify the ARN or the name, and you can specify both.

" + }, + "FirewallPolicyArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the firewall policy.

You must specify the ARN or the name, and you can specify both.

" + } + } + }, + "DescribeFirewallPolicyResponse":{ + "type":"structure", + "required":[ + "UpdateToken", + "FirewallPolicyResponse" + ], + "members":{ + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

A token used for optimistic locking. Network Firewall returns a token to your requests that access the firewall policy. The token marks the state of the policy resource at the time of the request.

To make changes to the policy, you provide the token in your request. Network Firewall uses the token to ensure that the policy hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the firewall policy again to get a current copy of it with current token. Reapply your changes as needed, then try the operation again using the new token.

" + }, + "FirewallPolicyResponse":{ + "shape":"FirewallPolicyResponse", + "documentation":"

The high-level properties of a firewall policy. This, along with the FirewallPolicy, define the policy. You can retrieve all objects for a firewall policy by calling DescribeFirewallPolicy.

" + }, + "FirewallPolicy":{ + "shape":"FirewallPolicy", + "documentation":"

The policy for the specified firewall policy.

" + } + } + }, + "DescribeFirewallRequest":{ + "type":"structure", + "members":{ + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

The descriptive name of the firewall. You can't change the name of a firewall after you create it.

You must specify the ARN or the name, and you can specify both.

" + }, + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the firewall.

You must specify the ARN or the name, and you can specify both.

" + } + } + }, + "DescribeFirewallResponse":{ + "type":"structure", + "members":{ + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

An optional token that you can use for optimistic locking. Network Firewall returns a token to your requests that access the firewall. The token marks the state of the firewall resource at the time of the request.

To make an unconditional change to the firewall, omit the token in your update request. Without the token, Network Firewall performs your updates regardless of whether the firewall has changed since you last retrieved it.

To make a conditional change to the firewall, provide the token in your update request. Network Firewall uses the token to ensure that the firewall hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the firewall again to get a current copy of it with a new token. Reapply your changes as needed, then try the operation again using the new token.

" + }, + "Firewall":{ + "shape":"Firewall", + "documentation":"

The configuration settings for the firewall. These settings include the firewall policy and the subnets in your VPC to use for the firewall endpoints.

" + }, + "FirewallStatus":{ + "shape":"FirewallStatus", + "documentation":"

Detailed information about the current status of a Firewall. You can retrieve this for a firewall by calling DescribeFirewall and providing the firewall name and ARN.

" + } + } + }, + "DescribeLoggingConfigurationRequest":{ + "type":"structure", + "members":{ + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the firewall.

You must specify the ARN or the name, and you can specify both.

" + }, + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

The descriptive name of the firewall. You can't change the name of a firewall after you create it.

You must specify the ARN or the name, and you can specify both.

" + } + } + }, + "DescribeLoggingConfigurationResponse":{ + "type":"structure", + "members":{ + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the firewall.

" + }, + "LoggingConfiguration":{"shape":"LoggingConfiguration"} + } + }, + "DescribeResourcePolicyRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the rule group or firewall policy whose resource policy you want to retrieve.

" + } + } + }, + "DescribeResourcePolicyResponse":{ + "type":"structure", + "members":{ + "Policy":{ + "shape":"PolicyString", + "documentation":"

The AWS Identity and Access Management policy for the resource.

" + } + } + }, + "DescribeRuleGroupRequest":{ + "type":"structure", + "members":{ + "RuleGroupName":{ + "shape":"ResourceName", + "documentation":"

The descriptive name of the rule group. You can't change the name of a rule group after you create it.

You must specify the ARN or the name, and you can specify both.

" + }, + "RuleGroupArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the rule group.

You must specify the ARN or the name, and you can specify both.

" + }, + "Type":{ + "shape":"RuleGroupType", + "documentation":"

Indicates whether the rule group is stateless or stateful. If the rule group is stateless, it contains stateless rules. If it is stateful, it contains stateful rules.

This setting is required for requests that do not include the RuleGroupARN.

" + } + } + }, + "DescribeRuleGroupResponse":{ + "type":"structure", + "required":[ + "UpdateToken", + "RuleGroupResponse" + ], + "members":{ + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

A token used for optimistic locking. Network Firewall returns a token to your requests that access the rule group. The token marks the state of the rule group resource at the time of the request.

To make changes to the rule group, you provide the token in your request. Network Firewall uses the token to ensure that the rule group hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the rule group again to get a current copy of it with a current token. Reapply your changes as needed, then try the operation again using the new token.

" + }, + "RuleGroup":{ + "shape":"RuleGroup", + "documentation":"

The object that defines the rules in a rule group. This, along with RuleGroupResponse, define the rule group. You can retrieve all objects for a rule group by calling DescribeRuleGroup.

AWS Network Firewall uses a rule group to inspect and control network traffic. You define stateless rule groups to inspect individual packets and you define stateful rule groups to inspect packets in the context of their traffic flow.

To use a rule group, you include it by reference in an Network Firewall firewall policy, then you use the policy in a firewall. You can reference a rule group from more than one firewall policy, and you can use a firewall policy in more than one firewall.

" + }, + "RuleGroupResponse":{ + "shape":"RuleGroupResponse", + "documentation":"

The high-level properties of a rule group. This, along with the RuleGroup, define the rule group. You can retrieve all objects for a rule group by calling DescribeRuleGroup.

" + } + } + }, + "Description":{ + "type":"string", + "max":512, + "pattern":"^.*$" + }, + "Destination":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"^.*$" + }, + "Dimension":{ + "type":"structure", + "required":["Value"], + "members":{ + "Value":{ + "shape":"DimensionValue", + "documentation":"

The value to use in the custom metric dimension.

" + } + }, + "documentation":"

The value to use in an Amazon CloudWatch custom metric dimension. This is used in the PublishMetrics CustomAction. A CloudWatch custom metric dimension is a name/value pair that's part of the identity of a metric.

AWS Network Firewall sets the dimension name to CustomAction and you provide the dimension value.

For more information about CloudWatch custom metric dimensions, see Publishing Custom Metrics in the Amazon CloudWatch User Guide.

" + }, + "DimensionValue":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[a-zA-Z0-9-_ ]+$" + }, + "Dimensions":{ + "type":"list", + "member":{"shape":"Dimension"}, + "max":1, + "min":1 + }, + "DisassociateSubnetsRequest":{ + "type":"structure", + "required":["SubnetIds"], + "members":{ + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

An optional token that you can use for optimistic locking. Network Firewall returns a token to your requests that access the firewall. The token marks the state of the firewall resource at the time of the request.

To make an unconditional change to the firewall, omit the token in your update request. Without the token, Network Firewall performs your updates regardless of whether the firewall has changed since you last retrieved it.

To make a conditional change to the firewall, provide the token in your update request. Network Firewall uses the token to ensure that the firewall hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the firewall again to get a current copy of it with a new token. Reapply your changes as needed, then try the operation again using the new token.

" + }, + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the firewall.

You must specify the ARN or the name, and you can specify both.

" + }, + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

The descriptive name of the firewall. You can't change the name of a firewall after you create it.

You must specify the ARN or the name, and you can specify both.

" + }, + "SubnetIds":{ + "shape":"AzSubnets", + "documentation":"

The unique identifiers for the subnets that you want to disassociate.

" + } + } + }, + "DisassociateSubnetsResponse":{ + "type":"structure", + "members":{ + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the firewall.

" + }, + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

The descriptive name of the firewall. You can't change the name of a firewall after you create it.

" + }, + "SubnetMappings":{ + "shape":"SubnetMappings", + "documentation":"

The IDs of the subnets that are associated with the firewall.

" + }, + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

An optional token that you can use for optimistic locking. Network Firewall returns a token to your requests that access the firewall. The token marks the state of the firewall resource at the time of the request.

To make an unconditional change to the firewall, omit the token in your update request. Without the token, Network Firewall performs your updates regardless of whether the firewall has changed since you last retrieved it.

To make a conditional change to the firewall, provide the token in your update request. Network Firewall uses the token to ensure that the firewall hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the firewall again to get a current copy of it with a new token. Reapply your changes as needed, then try the operation again using the new token.

" + } + } + }, + "EndpointId":{"type":"string"}, + "ErrorMessage":{"type":"string"}, + "Firewall":{ + "type":"structure", + "required":[ + "FirewallPolicyArn", + "VpcId", + "SubnetMappings", + "FirewallId" + ], + "members":{ + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

The descriptive name of the firewall. You can't change the name of a firewall after you create it.

" + }, + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the firewall.

" + }, + "FirewallPolicyArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the firewall policy.

The relationship of firewall to firewall policy is many to one. Each firewall requires one firewall policy association, and you can use the same firewall policy for multiple firewalls.

" + }, + "VpcId":{ + "shape":"VpcId", + "documentation":"

The unique identifier of the VPC where the firewall is in use.

" + }, + "SubnetMappings":{ + "shape":"SubnetMappings", + "documentation":"

The public subnets that Network Firewall is using for the firewall. Each subnet must belong to a different Availability Zone.

" + }, + "DeleteProtection":{ + "shape":"Boolean", + "documentation":"

A flag indicating whether it is possible to delete the firewall. A setting of TRUE indicates that the firewall is protected against deletion. Use this setting to protect against accidentally deleting a firewall that is in use. When you create a firewall, the operation initializes this flag to TRUE.

" + }, + "SubnetChangeProtection":{ + "shape":"Boolean", + "documentation":"

A setting indicating whether the firewall is protected against changes to the subnet associations. Use this setting to protect against accidentally modifying the subnet associations for a firewall that is in use. When you create a firewall, the operation initializes this setting to TRUE.

" + }, + "FirewallPolicyChangeProtection":{ + "shape":"Boolean", + "documentation":"

A setting indicating whether the firewall is protected against a change to the firewall policy association. Use this setting to protect against accidentally modifying the firewall policy for a firewall that is in use. When you create a firewall, the operation initializes this setting to TRUE.

" + }, + "Description":{ + "shape":"Description", + "documentation":"

A description of the firewall.

" + }, + "FirewallId":{ + "shape":"ResourceId", + "documentation":"

The unique identifier for the firewall.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

" + } + }, + "documentation":"

The firewall defines the configuration settings for an AWS Network Firewall firewall. These settings include the firewall policy, the subnets in your VPC to use for the firewall endpoints, and any tags that are attached to the firewall AWS resource.

The status of the firewall, for example whether it's ready to filter network traffic, is provided in the corresponding FirewallStatus. You can retrieve both objects by calling DescribeFirewall.

" + }, + "FirewallMetadata":{ + "type":"structure", + "members":{ + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

The descriptive name of the firewall. You can't change the name of a firewall after you create it.

" + }, + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the firewall.

" + } + }, + "documentation":"

High-level information about a firewall, returned by operations like create and describe. You can use the information provided in the metadata to retrieve and manage a firewall.

" + }, + "FirewallPolicies":{ + "type":"list", + "member":{"shape":"FirewallPolicyMetadata"} + }, + "FirewallPolicy":{ + "type":"structure", + "required":[ + "StatelessDefaultActions", + "StatelessFragmentDefaultActions" + ], + "members":{ + "StatelessRuleGroupReferences":{ + "shape":"StatelessRuleGroupReferences", + "documentation":"

References to the stateless rule groups that are used in the policy. These define the matching criteria in stateless rules.

" + }, + "StatelessDefaultActions":{ + "shape":"StatelessActions", + "documentation":"

The actions to take on a packet if it doesn't match any of the stateless rules in the policy. If you want non-matching packets to be forwarded for stateful inspection, specify aws:forward_to_sfe.

You must specify one of the standard actions: aws:pass, aws:drop, or aws:forward_to_sfe. In addition, you can specify custom actions that are compatible with your standard section choice.

For example, you could specify [\"aws:pass\"] or you could specify [\"aws:pass\", “customActionName”]. For information about compatibility, see the custom action descriptions under CustomAction.

" + }, + "StatelessFragmentDefaultActions":{ + "shape":"StatelessActions", + "documentation":"

The actions to take on a fragmented packet if it doesn't match any of the stateless rules in the policy. If you want non-matching fragmented packets to be forwarded for stateful inspection, specify aws:forward_to_sfe.

You must specify one of the standard actions: aws:pass, aws:drop, or aws:forward_to_sfe. In addition, you can specify custom actions that are compatible with your standard section choice.

For example, you could specify [\"aws:pass\"] or you could specify [\"aws:pass\", “customActionName”]. For information about compatibility, see the custom action descriptions under CustomAction.

" + }, + "StatelessCustomActions":{ + "shape":"CustomActions", + "documentation":"

The custom action definitions that are available for use in the firewall policy's StatelessDefaultActions setting. You name each custom action that you define, and then you can use it by name in your default actions specifications.

" + }, + "StatefulRuleGroupReferences":{ + "shape":"StatefulRuleGroupReferences", + "documentation":"

References to the stateless rule groups that are used in the policy. These define the inspection criteria in stateful rules.

" + } + }, + "documentation":"

The firewall policy defines the behavior of a firewall using a collection of stateless and stateful rule groups and other settings. You can use one firewall policy for multiple firewalls.

This, along with FirewallPolicyResponse, define the policy. You can retrieve all objects for a firewall policy by calling DescribeFirewallPolicy.

" + }, + "FirewallPolicyMetadata":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"ResourceName", + "documentation":"

The descriptive name of the firewall policy. You can't change the name of a firewall policy after you create it.

" + }, + "Arn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the firewall policy.

" + } + }, + "documentation":"

High-level information about a firewall policy, returned by operations like create and describe. You can use the information provided in the metadata to retrieve and manage a firewall policy. You can retrieve all objects for a firewall policy by calling DescribeFirewallPolicy.

" + }, + "FirewallPolicyResponse":{ + "type":"structure", + "required":[ + "FirewallPolicyName", + "FirewallPolicyArn", + "FirewallPolicyId" + ], + "members":{ + "FirewallPolicyName":{ + "shape":"ResourceName", + "documentation":"

The descriptive name of the firewall policy. You can't change the name of a firewall policy after you create it.

" + }, + "FirewallPolicyArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the firewall policy.

If this response is for a create request that had DryRun set to TRUE, then this ARN is a placeholder that isn't attached to a valid resource.

" + }, + "FirewallPolicyId":{ + "shape":"ResourceId", + "documentation":"

The unique identifier for the firewall policy.

" + }, + "Description":{ + "shape":"Description", + "documentation":"

A description of the firewall policy.

" + }, + "FirewallPolicyStatus":{ + "shape":"ResourceStatus", + "documentation":"

The current status of the firewall policy. You can retrieve this for a firewall policy by calling DescribeFirewallPolicy and providing the firewall policy's name or ARN.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The key:value pairs to associate with the resource.

" + } + }, + "documentation":"

The high-level properties of a firewall policy. This, along with the FirewallPolicy, define the policy. You can retrieve all objects for a firewall policy by calling DescribeFirewallPolicy.

" + }, + "FirewallStatus":{ + "type":"structure", + "required":[ + "Status", + "ConfigurationSyncStateSummary" + ], + "members":{ + "Status":{ + "shape":"FirewallStatusValue", + "documentation":"

The readiness of the configured firewall to handle network traffic across all of the Availability Zones where you've configured it. This setting is READY only when the ConfigurationSyncStateSummary value is IN_SYNC and the Attachment Status values for all of the configured subnets are READY.

" + }, + "ConfigurationSyncStateSummary":{ + "shape":"ConfigurationSyncState", + "documentation":"

The configuration sync state for the firewall. This summarizes the sync states reported in the Config settings for all of the Availability Zones where you have configured the firewall.

When you create a firewall or update its configuration, for example by adding a rule group to its firewall policy, Network Firewall distributes the configuration changes to all zones where the firewall is in use. This summary indicates whether the configuration changes have been applied everywhere.

This status must be IN_SYNC for the firewall to be ready for use, but it doesn't indicate that the firewall is ready. The Status setting indicates firewall readiness.

" + }, + "SyncStates":{ + "shape":"SyncStates", + "documentation":"

The subnets that you've configured for use by the Network Firewall firewall. This contains one array element per Availability Zone where you've configured a subnet. These objects provide details of the information that is summarized in the ConfigurationSyncStateSummary and Status, broken down by zone and configuration object.

" + } + }, + "documentation":"

Detailed information about the current status of a Firewall. You can retrieve this for a firewall by calling DescribeFirewall and providing the firewall name and ARN.

" + }, + "FirewallStatusValue":{ + "type":"string", + "enum":[ + "PROVISIONING", + "DELETING", + "READY" + ] + }, + "Firewalls":{ + "type":"list", + "member":{"shape":"FirewallMetadata"} + }, + "Flags":{ + "type":"list", + "member":{"shape":"TCPFlag"} + }, + "GeneratedRulesType":{ + "type":"string", + "enum":[ + "ALLOWLIST", + "DENYLIST" + ] + }, + "HashMapKey":{ + "type":"string", + "max":50, + "min":3, + "pattern":"^[0-9A-Za-z.\\-_@\\/]+$" + }, + "HashMapValue":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"[\\s\\S]*$" + }, + "Header":{ + "type":"structure", + "required":[ + "Protocol", + "Source", + "SourcePort", + "Direction", + "Destination", + "DestinationPort" + ], + "members":{ + "Protocol":{ + "shape":"StatefulRuleProtocol", + "documentation":"

The protocol to inspect for. To match with any protocol, specify ANY.

" + }, + "Source":{ + "shape":"Source", + "documentation":"

The source IP address or address range to inspect for, in CIDR notation. To match with any address, specify ANY.

Specify an IP address or a block of IP addresses in Classless Inter-Domain Routing (CIDR) notation. Network Firewall supports all address ranges for IPv4.

Examples:

  • To configure Network Firewall to inspect for the IP address 192.0.2.44, specify 192.0.2.44/32.

  • To configure Network Firewall to inspect for IP addresses from 192.0.2.0 to 192.0.2.255, specify 192.0.2.0/24.

For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.

" + }, + "SourcePort":{ + "shape":"Port", + "documentation":"

The source port to inspect for. You can specify an individual port, for example 1994 and you can specify a port range, for example 1990-1994. To match with any port, specify ANY.

" + }, + "Direction":{ + "shape":"StatefulRuleDirection", + "documentation":"

The direction of traffic flow to inspect. If set to ANY, the inspection matches bidirectional traffic, both from the source to the destination and from the destination to the source. If set to FORWARD, the inspection only matches traffic going from the source to the destination.

" + }, + "Destination":{ + "shape":"Destination", + "documentation":"

The destination IP address or address range to inspect for, in CIDR notation. To match with any address, specify ANY.

Specify an IP address or a block of IP addresses in Classless Inter-Domain Routing (CIDR) notation. Network Firewall supports all address ranges for IPv4.

Examples:

  • To configure Network Firewall to inspect for the IP address 192.0.2.44, specify 192.0.2.44/32.

  • To configure Network Firewall to inspect for IP addresses from 192.0.2.0 to 192.0.2.255, specify 192.0.2.0/24.

For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.

" + }, + "DestinationPort":{ + "shape":"Port", + "documentation":"

The destination port to inspect for. You can specify an individual port, for example 1994 and you can specify a port range, for example 1990-1994. To match with any port, specify ANY.

" + } + }, + "documentation":"

The 5-tuple criteria for AWS Network Firewall to use to inspect packet headers in stateful traffic flow inspection. Traffic flows that match the criteria are a match for the corresponding StatefulRule.

" + }, + "IPSet":{ + "type":"structure", + "required":["Definition"], + "members":{ + "Definition":{ + "shape":"VariableDefinitionList", + "documentation":"

The list of IP addresses and address ranges, in CIDR notation.

" + } + }, + "documentation":"

A list of IP addresses and address ranges, in CIDR notation. This is part of a RuleVariables.

" + }, + "IPSets":{ + "type":"map", + "key":{"shape":"RuleVariableName"}, + "value":{"shape":"IPSet"} + }, + "InsufficientCapacityException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

AWS doesn't currently have enough available capacity to fulfill your request. Try your request later.

", + "exception":true, + "fault":true + }, + "InternalServerError":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

Your request is valid, but Network Firewall couldn’t perform the operation because of a system problem. Retry your request.

", + "exception":true, + "fault":true + }, + "InvalidOperationException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The operation failed because it's not valid. For example, you might have tried to delete a rule group or firewall policy that's in use.

", + "exception":true + }, + "InvalidRequestException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The operation failed because of a problem with your request. Examples include:

  • You specified an unsupported parameter name or value.

  • You tried to update a property with a value that isn't among the available types.

  • Your request references an ARN that is malformed, or corresponds to a resource that isn't valid in the context of the request.

", + "exception":true + }, + "InvalidResourcePolicyException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

", + "exception":true + }, + "InvalidTokenException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The token you provided is stale or isn't valid for the operation.

", + "exception":true + }, + "Keyword":{ + "type":"string", + "max":128, + "min":1, + "pattern":".*" + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

Unable to perform the operation because doing so would violate a limit setting.

", + "exception":true + }, + "ListFirewallPoliciesRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

When you request a list of objects with a MaxResults setting, if the number of objects that are still available for retrieval exceeds the maximum you requested, Network Firewall returns a NextToken value in the response. To retrieve the next batch of objects, use the token returned from the prior request in your next request.

" + }, + "MaxResults":{ + "shape":"PaginationMaxResults", + "documentation":"

The maximum number of objects that you want Network Firewall to return for this request. If more objects are available, in the response, Network Firewall provides a NextToken value that you can use in a subsequent call to get the next batch of objects.

" + } + } + }, + "ListFirewallPoliciesResponse":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

When you request a list of objects with a MaxResults setting, if the number of objects that are still available for retrieval exceeds the maximum you requested, Network Firewall returns a NextToken value in the response. To retrieve the next batch of objects, use the token returned from the prior request in your next request.

" + }, + "FirewallPolicies":{ + "shape":"FirewallPolicies", + "documentation":"

The metadata for the firewall policies. Depending on your setting for max results and the number of firewall policies that you have, this might not be the full list.

" + } + } + }, + "ListFirewallsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

When you request a list of objects with a MaxResults setting, if the number of objects that are still available for retrieval exceeds the maximum you requested, Network Firewall returns a NextToken value in the response. To retrieve the next batch of objects, use the token returned from the prior request in your next request.

" + }, + "VpcIds":{ + "shape":"VpcIds", + "documentation":"

The unique identifiers of the VPCs that you want Network Firewall to retrieve the firewalls for. Leave this blank to retrieve all firewalls that you have defined.

" + }, + "MaxResults":{ + "shape":"PaginationMaxResults", + "documentation":"

The maximum number of objects that you want Network Firewall to return for this request. If more objects are available, in the response, Network Firewall provides a NextToken value that you can use in a subsequent call to get the next batch of objects.

" + } + } + }, + "ListFirewallsResponse":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

When you request a list of objects with a MaxResults setting, if the number of objects that are still available for retrieval exceeds the maximum you requested, Network Firewall returns a NextToken value in the response. To retrieve the next batch of objects, use the token returned from the prior request in your next request.

" + }, + "Firewalls":{ + "shape":"Firewalls", + "documentation":"

The firewall metadata objects for the VPCs that you specified. Depending on your setting for max results and the number of firewalls you have, a single call might not be the full list.

" + } + } + }, + "ListRuleGroupsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

When you request a list of objects with a MaxResults setting, if the number of objects that are still available for retrieval exceeds the maximum you requested, Network Firewall returns a NextToken value in the response. To retrieve the next batch of objects, use the token returned from the prior request in your next request.

" + }, + "MaxResults":{ + "shape":"PaginationMaxResults", + "documentation":"

The maximum number of objects that you want Network Firewall to return for this request. If more objects are available, in the response, Network Firewall provides a NextToken value that you can use in a subsequent call to get the next batch of objects.

" + } + } + }, + "ListRuleGroupsResponse":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

When you request a list of objects with a MaxResults setting, if the number of objects that are still available for retrieval exceeds the maximum you requested, Network Firewall returns a NextToken value in the response. To retrieve the next batch of objects, use the token returned from the prior request in your next request.

" + }, + "RuleGroups":{ + "shape":"RuleGroups", + "documentation":"

The rule group metadata objects that you've defined. Depending on your setting for max results and the number of rule groups, this might not be the full list.

" + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

When you request a list of objects with a MaxResults setting, if the number of objects that are still available for retrieval exceeds the maximum you requested, Network Firewall returns a NextToken value in the response. To retrieve the next batch of objects, use the token returned from the prior request in your next request.

" + }, + "MaxResults":{ + "shape":"TagsPaginationMaxResults", + "documentation":"

The maximum number of objects that you want Network Firewall to return for this request. If more objects are available, in the response, Network Firewall provides a NextToken value that you can use in a subsequent call to get the next batch of objects.

" + }, + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the resource.

" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

When you request a list of objects with a MaxResults setting, if the number of objects that are still available for retrieval exceeds the maximum you requested, Network Firewall returns a NextToken value in the response. To retrieve the next batch of objects, use the token returned from the prior request in your next request.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags that are associated with the resource.

" + } + } + }, + "LogDestinationConfig":{ + "type":"structure", + "required":[ + "LogType", + "LogDestinationType", + "LogDestination" + ], + "members":{ + "LogType":{ + "shape":"LogType", + "documentation":"

The type of log to send. Alert logs report traffic that matches a StatefulRule with an action setting that sends an alert log message. Flow logs are standard network traffic flow logs.

" + }, + "LogDestinationType":{ + "shape":"LogDestinationType", + "documentation":"

The type of storage destination to send these logs to. You can send logs to an Amazon S3 bucket, a CloudWatch log group, or a Kinesis Data Firehose delivery stream.

" + }, + "LogDestination":{ + "shape":"LogDestinationMap", + "documentation":"

The named location for the logs, provided in a key:value mapping that is specific to the chosen destination type.

  • For an Amazon S3 bucket, provide the name of the bucket, with key bucketName, and optionally provide a prefix, with key prefix. The following example specifies an Amazon S3 bucket named DOC-EXAMPLE-BUCKET and the prefix alerts:

    \"LogDestination\": { \"bucketName\": \"DOC-EXAMPLE-BUCKET\", \"prefix\": \"alerts\" }

  • For a CloudWatch log group, provide the name of the CloudWatch log group, with key logGroup. The following example specifies a log group named alert-log-group:

    \"LogDestination\": { \"logGroup\": \"alert-log-group\" }

  • For a Kinesis Data Firehose delivery stream, provide the name of the delivery stream, with key deliveryStream. The following example specifies a delivery stream named alert-delivery-stream:

    \"LogDestination\": { \"deliveryStream\": \"alert-delivery-stream\" }

" + } + }, + "documentation":"

Defines where AWS Network Firewall sends logs for the firewall for one log type. This is used in LoggingConfiguration. You can send each type of log to an Amazon S3 bucket, a CloudWatch log group, or a Kinesis Data Firehose delivery stream.

Network Firewall generates logs for stateful rule groups. You can save alert and flow log types. The stateful rules engine records flow logs for all network traffic that it receives. It records alert logs for traffic that matches stateful rules that have the rule action set to DROP or ALERT.

" + }, + "LogDestinationConfigs":{ + "type":"list", + "member":{"shape":"LogDestinationConfig"} + }, + "LogDestinationMap":{ + "type":"map", + "key":{"shape":"HashMapKey"}, + "value":{"shape":"HashMapValue"} + }, + "LogDestinationPermissionException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

Unable to send logs to a configured logging destination.

", + "exception":true + }, + "LogDestinationType":{ + "type":"string", + "enum":[ + "S3", + "CloudWatchLogs", + "KinesisDataFirehose" + ], + "max":30, + "min":2, + "pattern":"[0-9A-Za-z]+" + }, + "LogType":{ + "type":"string", + "enum":[ + "ALERT", + "FLOW" + ] + }, + "LoggingConfiguration":{ + "type":"structure", + "required":["LogDestinationConfigs"], + "members":{ + "LogDestinationConfigs":{ + "shape":"LogDestinationConfigs", + "documentation":"

Defines the logging destinations for the logs for a firewall. Network Firewall generates logs for stateful rule groups.

" + } + }, + "documentation":"

Defines how AWS Network Firewall performs logging for a Firewall.

" + }, + "MatchAttributes":{ + "type":"structure", + "members":{ + "Sources":{ + "shape":"Addresses", + "documentation":"

The source IP addresses and address ranges to inspect for, in CIDR notation. If not specified, this matches with any source address.

" + }, + "Destinations":{ + "shape":"Addresses", + "documentation":"

The destination IP addresses and address ranges to inspect for, in CIDR notation. If not specified, this matches with any destination address.

" + }, + "SourcePorts":{ + "shape":"PortRanges", + "documentation":"

The source ports to inspect for. If not specified, this matches with any source port. This setting is only used for protocols 6 (TCP) and 17 (UDP).

You can specify individual ports, for example 1994 and you can specify port ranges, for example 1990-1994.

" + }, + "DestinationPorts":{ + "shape":"PortRanges", + "documentation":"

The destination ports to inspect for. If not specified, this matches with any destination port. This setting is only used for protocols 6 (TCP) and 17 (UDP).

You can specify individual ports, for example 1994 and you can specify port ranges, for example 1990-1994.

" + }, + "Protocols":{ + "shape":"ProtocolNumbers", + "documentation":"

The protocols to inspect for, specified using each protocol's assigned internet protocol number (IANA). If not specified, this matches with any protocol.

" + }, + "TCPFlags":{ + "shape":"TCPFlags", + "documentation":"

The TCP flags and masks to inspect for. If not specified, this matches with any settings. This setting is only used for protocol 6 (TCP).

" + } + }, + "documentation":"

Criteria for Network Firewall to use to inspect an individual packet in stateless rule inspection. Each match attributes set can include one or more items such as IP address, CIDR range, port number, protocol, and TCP flags.

" + }, + "PaginationMaxResults":{ + "type":"integer", + "max":100, + "min":1 + }, + "PaginationToken":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"[0-9A-Za-z:\\/+=]+$" + }, + "PerObjectStatus":{ + "type":"structure", + "members":{ + "SyncStatus":{ + "shape":"PerObjectSyncStatus", + "documentation":"

" + } + }, + "documentation":"

" + }, + "PerObjectSyncStatus":{ + "type":"string", + "enum":[ + "PENDING", + "IN_SYNC" + ] + }, + "PolicyString":{ + "type":"string", + "max":395000, + "min":1, + "pattern":".*\\S.*" + }, + "Port":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"^.*$" + }, + "PortRange":{ + "type":"structure", + "required":[ + "FromPort", + "ToPort" + ], + "members":{ + "FromPort":{ + "shape":"PortRangeBound", + "documentation":"

The lower limit of the port range. This must be less than or equal to the ToPort specification.

" + }, + "ToPort":{ + "shape":"PortRangeBound", + "documentation":"

The upper limit of the port range. This must be greater than or equal to the FromPort specification.

" + } + }, + "documentation":"

A single port range specification. This is used for source and destination port ranges in the stateless rule MatchAttributes, SourcePorts, and DestinationPorts settings.

" + }, + "PortRangeBound":{ + "type":"integer", + "max":65535, + "min":0 + }, + "PortRanges":{ + "type":"list", + "member":{"shape":"PortRange"} + }, + "PortSet":{ + "type":"structure", + "members":{ + "Definition":{ + "shape":"VariableDefinitionList", + "documentation":"

The set of port ranges.

" + } + }, + "documentation":"

A set of port ranges for use in the rules in a rule group.

" + }, + "PortSets":{ + "type":"map", + "key":{"shape":"RuleVariableName"}, + "value":{"shape":"PortSet"} + }, + "Priority":{ + "type":"integer", + "max":65535, + "min":1 + }, + "ProtocolNumber":{ + "type":"integer", + "max":255, + "min":0 + }, + "ProtocolNumbers":{ + "type":"list", + "member":{"shape":"ProtocolNumber"} + }, + "PublishMetricAction":{ + "type":"structure", + "required":["Dimensions"], + "members":{ + "Dimensions":{ + "shape":"Dimensions", + "documentation":"

" + } + }, + "documentation":"

Stateless inspection criteria that publishes the specified metrics to Amazon CloudWatch for the matching packet. This setting defines a CloudWatch dimension value to be published.

" + }, + "PutResourcePolicyRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "Policy" + ], + "members":{ + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the account that you want to share rule groups and firewall policies with.

" + }, + "Policy":{ + "shape":"PolicyString", + "documentation":"

The AWS Identity and Access Management policy statement that lists the accounts that you want to share your rule group or firewall policy with and the operations that you want the accounts to be able to perform.

For a rule group resource, you can specify the following operations in the Actions section of the statement:

  • network-firewall:CreateFirewallPolicy

  • network-firewall:UpdateFirewallPolicy

  • network-firewall:ListRuleGroups

For a firewall policy resource, you can specify the following operations in the Actions section of the statement:

  • network-firewall:CreateFirewall

  • network-firewall:UpdateFirewall

  • network-firewall:AssociateFirewallPolicy

  • network-firewall:ListFirewallPolicies

In the Resource section of the statement, you specify the ARNs for the rule groups and firewall policies that you want to share with the account that you specified in Arn.

" + } + } + }, + "PutResourcePolicyResponse":{ + "type":"structure", + "members":{ + } + }, + "ResourceArn":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^arn:aws.*" + }, + "ResourceId":{ + "type":"string", + "max":36, + "min":36, + "pattern":"^([0-9a-f]{8})-([0-9a-f]{4}-){3}([0-9a-f]{12})$" + }, + "ResourceName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[a-zA-Z0-9-]+$" + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

Unable to locate a resource using the parameters that you provided.

", + "exception":true + }, + "ResourceOwnerCheckException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

", + "exception":true + }, + "ResourceStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "DELETING" + ] + }, + "RuleCapacity":{"type":"integer"}, + "RuleDefinition":{ + "type":"structure", + "required":[ + "MatchAttributes", + "Actions" + ], + "members":{ + "MatchAttributes":{ + "shape":"MatchAttributes", + "documentation":"

Criteria for Network Firewall to use to inspect an individual packet in stateless rule inspection. Each match attributes set can include one or more items such as IP address, CIDR range, port number, protocol, and TCP flags.

" + }, + "Actions":{ + "shape":"StatelessActions", + "documentation":"

The actions to take on a packet that matches one of the stateless rule definition's match attributes. You must specify a standard action and you can add custom actions.

Network Firewall only forwards a packet for stateful rule inspection if you specify aws:forward_to_sfe for a rule that the packet matches, or if the packet doesn't match any stateless rule and you specify aws:forward_to_sfe for the StatelessDefaultActions setting for the FirewallPolicy.

For every rule, you must specify exactly one of the following standard actions.

  • aws:pass - Discontinues all inspection of the packet and permits it to go to its intended destination.

  • aws:drop - Discontinues all inspection of the packet and blocks it from going to its intended destination.

  • aws:forward_to_sfe - Discontinues stateless inspection of the packet and forwards it to the stateful rule engine for inspection.

Additionally, you can specify a custom action. To do this, you define a custom action by name and type, then provide the name you've assigned to the action in this Actions setting. For information about the options, see CustomAction.

To provide more than one action in this setting, separate the settings with a comma. For example, if you have a custom PublishMetrics action that you've named MyMetricsAction, then you could specify the standard action aws:pass and the custom action with [“aws:pass”, “MyMetricsAction”].

" + } + }, + "documentation":"

The inspection criteria and action for a single stateless rule. AWS Network Firewall inspects each packet for the specified matching criteria. When a packet matches the criteria, Network Firewall performs the rule's actions on the packet.

" + }, + "RuleGroup":{ + "type":"structure", + "required":["RulesSource"], + "members":{ + "RuleVariables":{ + "shape":"RuleVariables", + "documentation":"

Settings that are available for use in the rules in the rule group. You can only use these for stateful rule groups.

" + }, + "RulesSource":{ + "shape":"RulesSource", + "documentation":"

The stateful rules or stateless rules for the rule group.

" + } + }, + "documentation":"

The object that defines the rules in a rule group. This, along with RuleGroupResponse, define the rule group. You can retrieve all objects for a rule group by calling DescribeRuleGroup.

AWS Network Firewall uses a rule group to inspect and control network traffic. You define stateless rule groups to inspect individual packets and you define stateful rule groups to inspect packets in the context of their traffic flow.

To use a rule group, you include it by reference in an Network Firewall firewall policy, then you use the policy in a firewall. You can reference a rule group from more than one firewall policy, and you can use a firewall policy in more than one firewall.

" + }, + "RuleGroupMetadata":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"ResourceName", + "documentation":"

The descriptive name of the rule group. You can't change the name of a rule group after you create it.

" + }, + "Arn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the rule group.

" + } + }, + "documentation":"

High-level information about a rule group, returned by ListRuleGroups. You can use the information provided in the metadata to retrieve and manage a rule group.

" + }, + "RuleGroupResponse":{ + "type":"structure", + "required":[ + "RuleGroupArn", + "RuleGroupName", + "RuleGroupId" + ], + "members":{ + "RuleGroupArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the rule group.

If this response is for a create request that had DryRun set to TRUE, then this ARN is a placeholder that isn't attached to a valid resource.

" + }, + "RuleGroupName":{ + "shape":"ResourceName", + "documentation":"

The descriptive name of the rule group. You can't change the name of a rule group after you create it.

" + }, + "RuleGroupId":{ + "shape":"ResourceId", + "documentation":"

The unique identifier for the rule group.

" + }, + "Description":{ + "shape":"Description", + "documentation":"

A description of the rule group.

" + }, + "Type":{ + "shape":"RuleGroupType", + "documentation":"

Indicates whether the rule group is stateless or stateful. If the rule group is stateless, it contains stateless rules. If it is stateful, it contains stateful rules.

" + }, + "Capacity":{ + "shape":"RuleCapacity", + "documentation":"

The maximum operating resources that this rule group can use. Rule group capacity is fixed at creation. When you update a rule group, you are limited to this capacity. When you reference a rule group from a firewall policy, Network Firewall reserves this capacity for the rule group.

You can retrieve the capacity that would be required for a rule group before you create the rule group by calling CreateRuleGroup with DryRun set to TRUE.

" + }, + "RuleGroupStatus":{ + "shape":"ResourceStatus", + "documentation":"

Detailed information about the current status of a rule group.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The key:value pairs to associate with the resource.

" + } + }, + "documentation":"

The high-level properties of a rule group. This, along with the RuleGroup, define the rule group. You can retrieve all objects for a rule group by calling DescribeRuleGroup.

" + }, + "RuleGroupType":{ + "type":"string", + "enum":[ + "STATELESS", + "STATEFUL" + ] + }, + "RuleGroups":{ + "type":"list", + "member":{"shape":"RuleGroupMetadata"} + }, + "RuleOption":{ + "type":"structure", + "required":["Keyword"], + "members":{ + "Keyword":{ + "shape":"Keyword", + "documentation":"

" + }, + "Settings":{ + "shape":"Settings", + "documentation":"

" + } + }, + "documentation":"

Additional settings for a stateful rule. This is part of the StatefulRule configuration.

" + }, + "RuleOptions":{ + "type":"list", + "member":{"shape":"RuleOption"} + }, + "RuleTargets":{ + "type":"list", + "member":{"shape":"CollectionMember_String"} + }, + "RuleVariableName":{ + "type":"string", + "max":32, + "min":1, + "pattern":"^[A-Za-z][A-Za-z0-9_]*$" + }, + "RuleVariables":{ + "type":"structure", + "members":{ + "IPSets":{ + "shape":"IPSets", + "documentation":"

A list of IP addresses and address ranges, in CIDR notation.

" + }, + "PortSets":{ + "shape":"PortSets", + "documentation":"

A list of port ranges.

" + } + }, + "documentation":"

Settings that are available for use in the rules in the RuleGroup where this is defined.

" + }, + "RulesSource":{ + "type":"structure", + "members":{ + "RulesString":{ + "shape":"RulesString", + "documentation":"

Stateful inspection criteria, provided in Suricata compatible intrusion prevention system (IPS) rules. Suricata is an open-source network IPS that includes a standard rule-based language for network traffic inspection.

These rules contain the inspection criteria and the action to take for traffic that matches the criteria, so this type of rule group doesn't have a separate action setting.

You can provide the rules from a file that you've stored in an Amazon S3 bucket, or by providing the rules in a Suricata rules string. To import from Amazon S3, provide the fully qualified name of the file that contains the rules definitions. To provide a Suricata rule string, provide the complete, Suricata compatible rule.

" + }, + "RulesSourceList":{ + "shape":"RulesSourceList", + "documentation":"

Stateful inspection criteria for a domain list rule group.

" + }, + "StatefulRules":{ + "shape":"StatefulRules", + "documentation":"

The 5-tuple stateful inspection criteria. This contains an array of individual 5-tuple stateful rules to be used together in a stateful rule group.

" + }, + "StatelessRulesAndCustomActions":{ + "shape":"StatelessRulesAndCustomActions", + "documentation":"

Stateless inspection criteria to be used in a stateless rule group.

" + } + }, + "documentation":"

The stateless or stateful rules definitions for use in a single rule group. Each rule group requires a single RulesSource. You can use an instance of this for either stateless rules or stateful rules.

" + }, + "RulesSourceList":{ + "type":"structure", + "required":[ + "Targets", + "TargetTypes", + "GeneratedRulesType" + ], + "members":{ + "Targets":{ + "shape":"RuleTargets", + "documentation":"

The domains that you want to inspect for in your traffic flows. To provide multiple domains, separate them with commas.

" + }, + "TargetTypes":{ + "shape":"TargetTypes", + "documentation":"

" + }, + "GeneratedRulesType":{ + "shape":"GeneratedRulesType", + "documentation":"

Whether you want to allow or deny access to the domains in your target list.

" + } + }, + "documentation":"

Stateful inspection criteria for a domain list rule group.

" + }, + "RulesString":{ + "type":"string", + "max":1000000, + "min":0 + }, + "Setting":{ + "type":"string", + "max":8192, + "min":1, + "pattern":".*" + }, + "Settings":{ + "type":"list", + "member":{"shape":"Setting"} + }, + "Source":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"^.*$" + }, + "StatefulAction":{ + "type":"string", + "enum":[ + "PASS", + "DROP", + "ALERT" + ] + }, + "StatefulRule":{ + "type":"structure", + "required":[ + "Action", + "Header", + "RuleOptions" + ], + "members":{ + "Action":{ + "shape":"StatefulAction", + "documentation":"

Defines what Network Firewall should do with the packets in a traffic flow when the flow matches the stateful rule criteria. For all actions, Network Firewall performs the specified action and discontinues stateful inspection of the traffic flow.

The actions for a stateful rule are defined as follows:

  • PASS - Permits the packets to go to the intended destination.

  • DROP - Blocks the packets from going to the intended destination and sends an alert log message, if alert logging is configured in the Firewall LoggingConfiguration.

  • ALERT - Permits the packets to go to the intended destination and sends an alert log message, if alert logging is configured in the Firewall LoggingConfiguration.

    You can use this action to test a rule that you intend to use to drop traffic. You can enable the rule with ALERT action, verify in the logs that the rule is filtering as you want, then change the action to DROP.

" + }, + "Header":{ + "shape":"Header", + "documentation":"

The stateful 5-tuple inspection criteria for this rule, used to inspect traffic flows.

" + }, + "RuleOptions":{ + "shape":"RuleOptions", + "documentation":"

" + } + }, + "documentation":"

A single 5-tuple stateful rule, for use in a stateful rule group.

" + }, + "StatefulRuleDirection":{ + "type":"string", + "enum":[ + "FORWARD", + "ANY" + ] + }, + "StatefulRuleGroupReference":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the stateful rule group.

" + } + }, + "documentation":"

Identifier for a single stateful rule group, used in a firewall policy to refer to a rule group.

" + }, + "StatefulRuleGroupReferences":{ + "type":"list", + "member":{"shape":"StatefulRuleGroupReference"} + }, + "StatefulRuleProtocol":{ + "type":"string", + "enum":[ + "IP", + "TCP", + "UDP", + "ICMP", + "HTTP", + "FTP", + "TLS", + "SMB", + "DNS", + "DCERPC", + "SSH", + "SMTP", + "IMAP", + "MSN", + "KRB5", + "IKEV2", + "TFTP", + "NTP", + "DHCP" + ] + }, + "StatefulRules":{ + "type":"list", + "member":{"shape":"StatefulRule"} + }, + "StatelessActions":{ + "type":"list", + "member":{"shape":"CollectionMember_String"} + }, + "StatelessRule":{ + "type":"structure", + "required":[ + "RuleDefinition", + "Priority" + ], + "members":{ + "RuleDefinition":{ + "shape":"RuleDefinition", + "documentation":"

Defines the stateless 5-tuple packet inspection criteria and the action to take on a packet that matches the criteria.

" + }, + "Priority":{ + "shape":"Priority", + "documentation":"

A setting that indicates the order in which to run this rule relative to all of the rules that are defined for a stateless rule group. Network Firewall evaluates the rules in a rule group starting with the lowest priority setting. You must ensure that the priority settings are unique for the rule group.

Each stateless rule group uses exactly one StatelessRulesAndCustomActions object, and each StatelessRulesAndCustomActions contains exactly one StatelessRules object. To ensure unique priority settings for your rule groups, set unique priorities for the stateless rules that you define inside any single StatelessRules object.

You can change the priority settings of your rules at any time. To make it easier to insert rules later, number them so there's a wide range in between, for example use 100, 200, and so on.

" + } + }, + "documentation":"

A single stateless rule. This is used in StatelessRulesAndCustomActions.

" + }, + "StatelessRuleGroupReference":{ + "type":"structure", + "required":[ + "ResourceArn", + "Priority" + ], + "members":{ + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the stateless rule group.

" + }, + "Priority":{ + "shape":"Priority", + "documentation":"

An integer setting that indicates the order in which to run the stateless rule groups in a single FirewallPolicy. Network Firewall applies each stateless rule group to a packet starting with the group that has the lowest priority setting. You must ensure that the priority settings are unique within each policy.

" + } + }, + "documentation":"

Identifier for a single stateless rule group, used in a firewall policy to refer to the rule group.

" + }, + "StatelessRuleGroupReferences":{ + "type":"list", + "member":{"shape":"StatelessRuleGroupReference"} + }, + "StatelessRules":{ + "type":"list", + "member":{"shape":"StatelessRule"} + }, + "StatelessRulesAndCustomActions":{ + "type":"structure", + "required":["StatelessRules"], + "members":{ + "StatelessRules":{ + "shape":"StatelessRules", + "documentation":"

Defines the set of stateless rules for use in a stateless rule group.

" + }, + "CustomActions":{ + "shape":"CustomActions", + "documentation":"

Defines an array of individual custom action definitions that are available for use by the stateless rules in this StatelessRulesAndCustomActions specification. You name each custom action that you define, and then you can use it by name in your StatelessRule RuleDefinition Actions specification.

" + } + }, + "documentation":"

Stateless inspection criteria. Each stateless rule group uses exactly one of these data types to define its stateless rules.

" + }, + "SubnetMapping":{ + "type":"structure", + "required":["SubnetId"], + "members":{ + "SubnetId":{ + "shape":"CollectionMember_String", + "documentation":"

The unique identifier for the subnet.

" + } + }, + "documentation":"

The ID for a subnet that you want to associate with the firewall. This is used with CreateFirewall and AssociateSubnets. AWS Network Firewall creates an instance of the associated firewall in each subnet that you specify, to filter traffic in the subnet's Availability Zone.

" + }, + "SubnetMappings":{ + "type":"list", + "member":{"shape":"SubnetMapping"} + }, + "SyncState":{ + "type":"structure", + "members":{ + "Attachment":{ + "shape":"Attachment", + "documentation":"

The attachment status of the firewall's association with a single VPC subnet. For each configured subnet, Network Firewall creates the attachment by instantiating the firewall endpoint in the subnet so that it's ready to take traffic. This is part of the FirewallStatus.

" + }, + "Config":{ + "shape":"SyncStateConfig", + "documentation":"

The configuration status of the firewall endpoint in a single VPC subnet. Network Firewall provides each endpoint with the rules that are configured in the firewall policy. Each time you add a subnet or modify the associated firewall policy, Network Firewall synchronizes the rules in the endpoint, so it can properly filter network traffic. This is part of the FirewallStatus.

" + } + }, + "documentation":"

The status of the firewall endpoint and firewall policy configuration for a single VPC subnet.

For each VPC subnet that you associate with a firewall, AWS Network Firewall does the following:

  • Instantiates a firewall endpoint in the subnet, ready to take traffic.

  • Configures the endpoint with the current firewall policy settings, to provide the filtering behavior for the endpoint.

When you update a firewall, for example to add a subnet association or change a rule group in the firewall policy, the affected sync states reflect out-of-sync or not ready status until the changes are complete.

" + }, + "SyncStateConfig":{ + "type":"map", + "key":{"shape":"ResourceName"}, + "value":{"shape":"PerObjectStatus"} + }, + "SyncStates":{ + "type":"map", + "key":{"shape":"AvailabilityZone"}, + "value":{"shape":"SyncState"} + }, + "TCPFlag":{ + "type":"string", + "enum":[ + "FIN", + "SYN", + "RST", + "PSH", + "ACK", + "URG", + "ECE", + "CWR" + ] + }, + "TCPFlagField":{ + "type":"structure", + "required":["Flags"], + "members":{ + "Flags":{ + "shape":"Flags", + "documentation":"

Used in conjunction with the Masks setting to define the flags that must be set and flags that must not be set in order for the packet to match. This setting can only specify values that are also specified in the Masks setting.

For the flags that are specified in the masks setting, the following must be true for the packet to match:

  • The ones that are set in this flags setting must be set in the packet.

  • The ones that are not set in this flags setting must also not be set in the packet.

" + }, + "Masks":{ + "shape":"Flags", + "documentation":"

The set of flags to consider in the inspection. To inspect all flags in the valid values list, leave this with no setting.

" + } + }, + "documentation":"

TCP flags and masks to inspect packets for, used in stateless rules MatchAttributes settings.

" + }, + "TCPFlags":{ + "type":"list", + "member":{"shape":"TCPFlagField"} + }, + "Tag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{ + "shape":"TagKey", + "documentation":"

The part of the key:value pair that defines a tag. You can use a tag key to describe a category of information, such as \"customer.\" Tag keys are case-sensitive.

" + }, + "Value":{ + "shape":"TagValue", + "documentation":"

The part of the key:value pair that defines a tag. You can use a tag value to describe a specific value within a category, such as \"companyA\" or \"companyB.\" Tag values are case-sensitive.

" + } + }, + "documentation":"

A key:value pair associated with an AWS resource. The key:value pair can be anything you define. Typically, the tag key represents a category (such as \"environment\") and the tag value represents a specific value within that category (such as \"test,\" \"development,\" or \"production\"). You can add up to 50 tags to each AWS resource.

" + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^.*$" + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":1 + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":200, + "min":1 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "Tags" + ], + "members":{ + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the resource.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

" + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0, + "pattern":"^.*$" + }, + "TagsPaginationMaxResults":{ + "type":"integer", + "max":100, + "min":0 + }, + "TargetType":{ + "type":"string", + "enum":[ + "TLS_SNI", + "HTTP_HOST" + ] + }, + "TargetTypes":{ + "type":"list", + "member":{"shape":"TargetType"} + }, + "ThrottlingException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

Unable to process the request due to throttling limitations.

", + "exception":true + }, + "UnsupportedOperationException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The operation you requested isn't supported by Network Firewall.

", + "exception":true + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "TagKeys" + ], + "members":{ + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the resource.

" + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateFirewallDeleteProtectionRequest":{ + "type":"structure", + "required":["DeleteProtection"], + "members":{ + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

An optional token that you can use for optimistic locking. Network Firewall returns a token to your requests that access the firewall. The token marks the state of the firewall resource at the time of the request.

To make an unconditional change to the firewall, omit the token in your update request. Without the token, Network Firewall performs your updates regardless of whether the firewall has changed since you last retrieved it.

To make a conditional change to the firewall, provide the token in your update request. Network Firewall uses the token to ensure that the firewall hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the firewall again to get a current copy of it with a new token. Reapply your changes as needed, then try the operation again using the new token.

" + }, + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the firewall.

You must specify the ARN or the name, and you can specify both.

" + }, + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

The descriptive name of the firewall. You can't change the name of a firewall after you create it.

You must specify the ARN or the name, and you can specify both.

" + }, + "DeleteProtection":{ + "shape":"Boolean", + "documentation":"

A flag indicating whether it is possible to delete the firewall. A setting of TRUE indicates that the firewall is protected against deletion. Use this setting to protect against accidentally deleting a firewall that is in use. When you create a firewall, the operation initializes this flag to TRUE.

" + } + } + }, + "UpdateFirewallDeleteProtectionResponse":{ + "type":"structure", + "members":{ + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the firewall.

" + }, + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

The descriptive name of the firewall. You can't change the name of a firewall after you create it.

" + }, + "DeleteProtection":{ + "shape":"Boolean", + "documentation":"

" + }, + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

An optional token that you can use for optimistic locking. Network Firewall returns a token to your requests that access the firewall. The token marks the state of the firewall resource at the time of the request.

To make an unconditional change to the firewall, omit the token in your update request. Without the token, Network Firewall performs your updates regardless of whether the firewall has changed since you last retrieved it.

To make a conditional change to the firewall, provide the token in your update request. Network Firewall uses the token to ensure that the firewall hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the firewall again to get a current copy of it with a new token. Reapply your changes as needed, then try the operation again using the new token.

" + } + } + }, + "UpdateFirewallDescriptionRequest":{ + "type":"structure", + "members":{ + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

An optional token that you can use for optimistic locking. Network Firewall returns a token to your requests that access the firewall. The token marks the state of the firewall resource at the time of the request.

To make an unconditional change to the firewall, omit the token in your update request. Without the token, Network Firewall performs your updates regardless of whether the firewall has changed since you last retrieved it.

To make a conditional change to the firewall, provide the token in your update request. Network Firewall uses the token to ensure that the firewall hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the firewall again to get a current copy of it with a new token. Reapply your changes as needed, then try the operation again using the new token.

" + }, + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the firewall.

You must specify the ARN or the name, and you can specify both.

" + }, + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

The descriptive name of the firewall. You can't change the name of a firewall after you create it.

You must specify the ARN or the name, and you can specify both.

" + }, + "Description":{ + "shape":"Description", + "documentation":"

The new description for the firewall. If you omit this setting, Network Firewall removes the description for the firewall.

" + } + } + }, + "UpdateFirewallDescriptionResponse":{ + "type":"structure", + "members":{ + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the firewall.

" + }, + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

The descriptive name of the firewall. You can't change the name of a firewall after you create it.

" + }, + "Description":{ + "shape":"Description", + "documentation":"

A description of the firewall.

" + }, + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

An optional token that you can use for optimistic locking. Network Firewall returns a token to your requests that access the firewall. The token marks the state of the firewall resource at the time of the request.

To make an unconditional change to the firewall, omit the token in your update request. Without the token, Network Firewall performs your updates regardless of whether the firewall has changed since you last retrieved it.

To make a conditional change to the firewall, provide the token in your update request. Network Firewall uses the token to ensure that the firewall hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the firewall again to get a current copy of it with a new token. Reapply your changes as needed, then try the operation again using the new token.

" + } + } + }, + "UpdateFirewallPolicyChangeProtectionRequest":{ + "type":"structure", + "required":["FirewallPolicyChangeProtection"], + "members":{ + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

An optional token that you can use for optimistic locking. Network Firewall returns a token to your requests that access the firewall. The token marks the state of the firewall resource at the time of the request.

To make an unconditional change to the firewall, omit the token in your update request. Without the token, Network Firewall performs your updates regardless of whether the firewall has changed since you last retrieved it.

To make a conditional change to the firewall, provide the token in your update request. Network Firewall uses the token to ensure that the firewall hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the firewall again to get a current copy of it with a new token. Reapply your changes as needed, then try the operation again using the new token.

" + }, + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the firewall.

You must specify the ARN or the name, and you can specify both.

" + }, + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

The descriptive name of the firewall. You can't change the name of a firewall after you create it.

You must specify the ARN or the name, and you can specify both.

" + }, + "FirewallPolicyChangeProtection":{ + "shape":"Boolean", + "documentation":"

A setting indicating whether the firewall is protected against a change to the firewall policy association. Use this setting to protect against accidentally modifying the firewall policy for a firewall that is in use. When you create a firewall, the operation initializes this setting to TRUE.

" + } + } + }, + "UpdateFirewallPolicyChangeProtectionResponse":{ + "type":"structure", + "members":{ + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

An optional token that you can use for optimistic locking. Network Firewall returns a token to your requests that access the firewall. The token marks the state of the firewall resource at the time of the request.

To make an unconditional change to the firewall, omit the token in your update request. Without the token, Network Firewall performs your updates regardless of whether the firewall has changed since you last retrieved it.

To make a conditional change to the firewall, provide the token in your update request. Network Firewall uses the token to ensure that the firewall hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the firewall again to get a current copy of it with a new token. Reapply your changes as needed, then try the operation again using the new token.

" + }, + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the firewall.

" + }, + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

The descriptive name of the firewall. You can't change the name of a firewall after you create it.

" + }, + "FirewallPolicyChangeProtection":{ + "shape":"Boolean", + "documentation":"

A setting indicating whether the firewall is protected against a change to the firewall policy association. Use this setting to protect against accidentally modifying the firewall policy for a firewall that is in use. When you create a firewall, the operation initializes this setting to TRUE.

" + } + } + }, + "UpdateFirewallPolicyRequest":{ + "type":"structure", + "required":[ + "UpdateToken", + "FirewallPolicy" + ], + "members":{ + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

A token used for optimistic locking. Network Firewall returns a token to your requests that access the firewall policy. The token marks the state of the policy resource at the time of the request.

To make changes to the policy, you provide the token in your request. Network Firewall uses the token to ensure that the policy hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the firewall policy again to get a current copy of it with current token. Reapply your changes as needed, then try the operation again using the new token.

" + }, + "FirewallPolicyArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the firewall policy.

You must specify the ARN or the name, and you can specify both.

" + }, + "FirewallPolicyName":{ + "shape":"ResourceName", + "documentation":"

The descriptive name of the firewall policy. You can't change the name of a firewall policy after you create it.

You must specify the ARN or the name, and you can specify both.

" + }, + "FirewallPolicy":{ + "shape":"FirewallPolicy", + "documentation":"

The updated firewall policy to use for the firewall.

" + }, + "Description":{ + "shape":"Description", + "documentation":"

A description of the firewall policy.

" + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

Indicates whether you want Network Firewall to just check the validity of the request, rather than run the request.

If set to TRUE, Network Firewall checks whether the request can run successfully, but doesn't actually make the requested changes. The call returns the value that the request would return if you ran it with dry run set to FALSE, but doesn't make additions or changes to your resources. This option allows you to make sure that you have the required permissions to run the request and that your request parameters are valid.

If set to FALSE, Network Firewall makes the requested changes to your resources.

" + } + } + }, + "UpdateFirewallPolicyResponse":{ + "type":"structure", + "required":[ + "UpdateToken", + "FirewallPolicyResponse" + ], + "members":{ + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

A token used for optimistic locking. Network Firewall returns a token to your requests that access the firewall policy. The token marks the state of the policy resource at the time of the request.

To make changes to the policy, you provide the token in your request. Network Firewall uses the token to ensure that the policy hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the firewall policy again to get a current copy of it with current token. Reapply your changes as needed, then try the operation again using the new token.

" + }, + "FirewallPolicyResponse":{ + "shape":"FirewallPolicyResponse", + "documentation":"

The high-level properties of a firewall policy. This, along with the FirewallPolicy, define the policy. You can retrieve all objects for a firewall policy by calling DescribeFirewallPolicy.

" + } + } + }, + "UpdateLoggingConfigurationRequest":{ + "type":"structure", + "members":{ + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the firewall.

You must specify the ARN or the name, and you can specify both.

" + }, + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

The descriptive name of the firewall. You can't change the name of a firewall after you create it.

You must specify the ARN or the name, and you can specify both.

" + }, + "LoggingConfiguration":{ + "shape":"LoggingConfiguration", + "documentation":"

Defines how Network Firewall performs logging for a firewall. If you omit this setting, Network Firewall disables logging for the firewall.

" + } + } + }, + "UpdateLoggingConfigurationResponse":{ + "type":"structure", + "members":{ + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the firewall.

" + }, + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

The descriptive name of the firewall. You can't change the name of a firewall after you create it.

" + }, + "LoggingConfiguration":{"shape":"LoggingConfiguration"} + } + }, + "UpdateRuleGroupRequest":{ + "type":"structure", + "required":["UpdateToken"], + "members":{ + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

A token used for optimistic locking. Network Firewall returns a token to your requests that access the rule group. The token marks the state of the rule group resource at the time of the request.

To make changes to the rule group, you provide the token in your request. Network Firewall uses the token to ensure that the rule group hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the rule group again to get a current copy of it with a current token. Reapply your changes as needed, then try the operation again using the new token.

" + }, + "RuleGroupArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the rule group.

You must specify the ARN or the name, and you can specify both.

" + }, + "RuleGroupName":{ + "shape":"ResourceName", + "documentation":"

The descriptive name of the rule group. You can't change the name of a rule group after you create it.

You must specify the ARN or the name, and you can specify both.

" + }, + "RuleGroup":{ + "shape":"RuleGroup", + "documentation":"

An object that defines the rule group rules.

You must provide either this rule group setting or a Rules setting, but not both.

" + }, + "Rules":{ + "shape":"RulesString", + "documentation":"

The name of a file containing stateful rule group rules specifications in Suricata flat format, with one rule per line. Use this to import your existing Suricata compatible rule groups.

You must provide either this rules setting or a populated RuleGroup setting, but not both.

You can provide your rule group specification in a file through this setting when you create or update your rule group. The call response returns a RuleGroup object that Network Firewall has populated from your file. Network Firewall uses the file contents to populate the rule group rules, but does not maintain a reference to the file or use the file in any way after performing the create or update. If you call DescribeRuleGroup to retrieve the rule group, Network Firewall returns rules settings inside a RuleGroup object.

" + }, + "Type":{ + "shape":"RuleGroupType", + "documentation":"

Indicates whether the rule group is stateless or stateful. If the rule group is stateless, it contains stateless rules. If it is stateful, it contains stateful rules.

This setting is required for requests that do not include the RuleGroupARN.

" + }, + "Description":{ + "shape":"Description", + "documentation":"

A description of the rule group.

" + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

Indicates whether you want Network Firewall to just check the validity of the request, rather than run the request.

If set to TRUE, Network Firewall checks whether the request can run successfully, but doesn't actually make the requested changes. The call returns the value that the request would return if you ran it with dry run set to FALSE, but doesn't make additions or changes to your resources. This option allows you to make sure that you have the required permissions to run the request and that your request parameters are valid.

If set to FALSE, Network Firewall makes the requested changes to your resources.

" + } + } + }, + "UpdateRuleGroupResponse":{ + "type":"structure", + "required":[ + "UpdateToken", + "RuleGroupResponse" + ], + "members":{ + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

A token used for optimistic locking. Network Firewall returns a token to your requests that access the rule group. The token marks the state of the rule group resource at the time of the request.

To make changes to the rule group, you provide the token in your request. Network Firewall uses the token to ensure that the rule group hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the rule group again to get a current copy of it with a current token. Reapply your changes as needed, then try the operation again using the new token.

" + }, + "RuleGroupResponse":{ + "shape":"RuleGroupResponse", + "documentation":"

The high-level properties of a rule group. This, along with the RuleGroup, define the rule group. You can retrieve all objects for a rule group by calling DescribeRuleGroup.

" + } + } + }, + "UpdateSubnetChangeProtectionRequest":{ + "type":"structure", + "required":["SubnetChangeProtection"], + "members":{ + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

An optional token that you can use for optimistic locking. Network Firewall returns a token to your requests that access the firewall. The token marks the state of the firewall resource at the time of the request.

To make an unconditional change to the firewall, omit the token in your update request. Without the token, Network Firewall performs your updates regardless of whether the firewall has changed since you last retrieved it.

To make a conditional change to the firewall, provide the token in your update request. Network Firewall uses the token to ensure that the firewall hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the firewall again to get a current copy of it with a new token. Reapply your changes as needed, then try the operation again using the new token.

" + }, + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the firewall.

You must specify the ARN or the name, and you can specify both.

" + }, + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

The descriptive name of the firewall. You can't change the name of a firewall after you create it.

You must specify the ARN or the name, and you can specify both.

" + }, + "SubnetChangeProtection":{ + "shape":"Boolean", + "documentation":"

A setting indicating whether the firewall is protected against changes to the subnet associations. Use this setting to protect against accidentally modifying the subnet associations for a firewall that is in use. When you create a firewall, the operation initializes this setting to TRUE.

" + } + } + }, + "UpdateSubnetChangeProtectionResponse":{ + "type":"structure", + "members":{ + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

An optional token that you can use for optimistic locking. Network Firewall returns a token to your requests that access the firewall. The token marks the state of the firewall resource at the time of the request.

To make an unconditional change to the firewall, omit the token in your update request. Without the token, Network Firewall performs your updates regardless of whether the firewall has changed since you last retrieved it.

To make a conditional change to the firewall, provide the token in your update request. Network Firewall uses the token to ensure that the firewall hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the firewall again to get a current copy of it with a new token. Reapply your changes as needed, then try the operation again using the new token.

" + }, + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the firewall.

" + }, + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

The descriptive name of the firewall. You can't change the name of a firewall after you create it.

" + }, + "SubnetChangeProtection":{ + "shape":"Boolean", + "documentation":"

A setting indicating whether the firewall is protected against changes to the subnet associations. Use this setting to protect against accidentally modifying the subnet associations for a firewall that is in use. When you create a firewall, the operation initializes this setting to TRUE.

" + } + } + }, + "UpdateToken":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"^([0-9a-f]{8})-([0-9a-f]{4}-){3}([0-9a-f]{12})$" + }, + "VariableDefinition":{ + "type":"string", + "min":1, + "pattern":"^.*$" + }, + "VariableDefinitionList":{ + "type":"list", + "member":{"shape":"VariableDefinition"} + }, + "VpcId":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^vpc-[0-9a-f]+$" + }, + "VpcIds":{ + "type":"list", + "member":{"shape":"VpcId"} + } + }, + "documentation":"

This is the API Reference for AWS Network Firewall. This guide is for developers who need detailed information about the Network Firewall API actions, data types, and errors.

  • The REST API requires you to handle connection details, such as calculating signatures, handling request retries, and error handling. For general information about using the AWS REST APIs, see AWS APIs.

    To access Network Firewall using the REST API endpoint: https://network-firewall.<region>.amazonaws.com

  • Alternatively, you can use one of the AWS SDKs to access an API that's tailored to the programming language or platform that you're using. For more information, see AWS SDKs.

  • For descriptions of Network Firewall features, including and step-by-step instructions on how to use them through the Network Firewall console, see the Network Firewall Developer Guide.

Network Firewall is a stateful, managed, network firewall and intrusion detection and prevention service for Amazon Virtual Private Cloud (Amazon VPC). With Network Firewall, you can filter traffic at the perimeter of your VPC. This includes filtering traffic going to and coming from an internet gateway, NAT gateway, or over VPN or AWS Direct Connect. Network Firewall uses rules that are compatible with Suricata, a free, open source intrusion detection system (IDS) engine. For information about Suricata, see the Suricata website.

You can use Network Firewall to monitor and protect your VPC traffic in a number of ways. The following are just a few examples:

  • Allow domains or IP addresses for known AWS service endpoints, such as Amazon S3, and block all other forms of traffic.

  • Use custom lists of known bad domains to limit the types of domain names that your applications can access.

  • Perform deep packet inspection on traffic entering or leaving your VPC.

  • Rate limit traffic going from AWS to on-premises IP destinations.

  • Use stateful protocol detection to filter protocols like HTTPS, regardless of the port used.

To enable Network Firewall for your VPCs, you perform steps in both Amazon VPC and in Network Firewall. For information about using Amazon VPC, see Amazon VPC User Guide.

To start using Network Firewall, do the following:

  1. (Optional) If you don't already have a VPC that you want to protect, create it in Amazon VPC.

  2. In Amazon VPC, in each Availability Zone where you want to have a firewall endpoint, create a subnet for the sole use of Network Firewall.

  3. In Network Firewall, create stateless and stateful rule groups, to define the components of the network traffic filtering behavior that you want your firewall to have.

  4. In Network Firewall, create a firewall policy that uses your rule groups and specifies additional default traffic filtering behavior.

  5. In Network Firewall, create a firewall and specify your new firewall policy and VPC subnets. Network Firewall creates a firewall endpoint in each subnet that you specify, with the behavior that's defined in the firewall policy.

  6. In Amazon VPC, use ingress routing enhancements to route traffic through the new firewall endpoints.

" +} diff --git a/services/networkmanager/pom.xml b/services/networkmanager/pom.xml index 3d6b51b56d4d..545defb9cf42 100644 --- a/services/networkmanager/pom.xml +++ b/services/networkmanager/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT networkmanager AWS Java SDK :: Services :: NetworkManager diff --git a/services/opsworks/pom.xml b/services/opsworks/pom.xml index e7d035946510..82064a376317 100644 --- a/services/opsworks/pom.xml +++ b/services/opsworks/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT opsworks AWS Java SDK :: Services :: AWS OpsWorks diff --git a/services/opsworkscm/pom.xml b/services/opsworkscm/pom.xml index 1f5e71a0470a..fa5dbec3c79a 100644 --- a/services/opsworkscm/pom.xml +++ b/services/opsworkscm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT opsworkscm AWS Java SDK :: Services :: AWS OpsWorks for Chef Automate diff --git a/services/organizations/pom.xml b/services/organizations/pom.xml index 44fcc140c2d5..7cfdff65217c 100644 --- a/services/organizations/pom.xml +++ b/services/organizations/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT organizations AWS Java SDK :: Services :: AWS Organizations diff --git a/services/organizations/src/main/resources/codegen-resources/service-2.json b/services/organizations/src/main/resources/codegen-resources/service-2.json index 21828d04a9bd..de1098be3006 100644 --- a/services/organizations/src/main/resources/codegen-resources/service-2.json +++ b/services/organizations/src/main/resources/codegen-resources/service-2.json @@ -34,7 +34,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"AccessDeniedForDependencyException"} ], - "documentation":"

Sends a response to the originator of a handshake agreeing to the action proposed by the handshake request.

This operation can be called only by the following principals when they also have the relevant IAM permissions:

  • Invitation to join or Approve all features request handshakes: only a principal from the member account.

    The user who calls the API for an invitation to join must have the organizations:AcceptHandshake permission. If you enabled all features in the organization, the user must also have the iam:CreateServiceLinkedRole permission so that AWS Organizations can create the required service-linked role named AWSServiceRoleForOrganizations. For more information, see AWS Organizations and Service-Linked Roles in the AWS Organizations User Guide.

  • Enable all features final confirmation handshake: only a principal from the master account.

    For more information about invitations, see Inviting an AWS Account to Join Your Organization in the AWS Organizations User Guide. For more information about requests to enable all features in the organization, see Enabling All Features in Your Organization in the AWS Organizations User Guide.

After you accept a handshake, it continues to appear in the results of relevant APIs for only 30 days. After that, it's deleted.

" + "documentation":"

Sends a response to the originator of a handshake agreeing to the action proposed by the handshake request.

This operation can be called only by the following principals when they also have the relevant IAM permissions:

  • Invitation to join or Approve all features request handshakes: only a principal from the member account.

    The user who calls the API for an invitation to join must have the organizations:AcceptHandshake permission. If you enabled all features in the organization, the user must also have the iam:CreateServiceLinkedRole permission so that AWS Organizations can create the required service-linked role named AWSServiceRoleForOrganizations. For more information, see AWS Organizations and Service-Linked Roles in the AWS Organizations User Guide.

  • Enable all features final confirmation handshake: only a principal from the management account.

    For more information about invitations, see Inviting an AWS Account to Join Your Organization in the AWS Organizations User Guide. For more information about requests to enable all features in the organization, see Enabling All Features in Your Organization in the AWS Organizations User Guide.

After you accept a handshake, it continues to appear in the results of relevant APIs for only 30 days. After that, it's deleted.

" }, "AttachPolicy":{ "name":"AttachPolicy", @@ -58,7 +58,7 @@ {"shape":"UnsupportedAPIEndpointException"}, {"shape":"PolicyChangesInProgressException"} ], - "documentation":"

Attaches a policy to a root, an organizational unit (OU), or an individual account. How the policy affects accounts depends on the type of policy. Refer to the AWS Organizations User Guide for information about each policy type:

This operation can be called only from the organization's master account.

" + "documentation":"

Attaches a policy to a root, an organizational unit (OU), or an individual account. How the policy affects accounts depends on the type of policy. Refer to the AWS Organizations User Guide for information about each policy type:

This operation can be called only from the organization's management account.

" }, "CancelHandshake":{ "name":"CancelHandshake", @@ -99,7 +99,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

Creates an AWS account that is automatically a member of the organization whose credentials made the request. This is an asynchronous request that AWS performs in the background. Because CreateAccount operates asynchronously, it can return a successful completion message even though account initialization might still be in progress. You might need to wait a few minutes before you can successfully access the account. To check the status of the request, do one of the following:

The user who calls the API to create an account must have the organizations:CreateAccount permission. If you enabled all features in the organization, AWS Organizations creates the required service-linked role named AWSServiceRoleForOrganizations. For more information, see AWS Organizations and Service-Linked Roles in the AWS Organizations User Guide.

If the request includes tags, then the requester must have the organizations:TagResource permission.

AWS Organizations preconfigures the new member account with a role (named OrganizationAccountAccessRole by default) that grants users in the master account administrator permissions in the new member account. Principals in the master account can assume the role. AWS Organizations clones the company name and address information for the new account from the organization's master account.

This operation can be called only from the organization's master account.

For more information about creating accounts, see Creating an AWS Account in Your Organization in the AWS Organizations User Guide.

  • When you create an account in an organization using the AWS Organizations console, API, or CLI commands, the information required for the account to operate as a standalone account, such as a payment method and signing the end user license agreement (EULA) is not automatically collected. If you must remove an account from your organization later, you can do so only after you provide the missing information. Follow the steps at To leave an organization as a member account in the AWS Organizations User Guide.

  • If you get an exception that indicates that you exceeded your account limits for the organization, contact AWS Support.

  • If you get an exception that indicates that the operation failed because your organization is still initializing, wait one hour and then try again. If the error persists, contact AWS Support.

  • Using CreateAccount to create multiple temporary accounts isn't recommended. You can only close an account from the Billing and Cost Management Console, and you must be signed in as the root user. For information on the requirements and process for closing an account, see Closing an AWS Account in the AWS Organizations User Guide.

When you create a member account with this operation, you can choose whether to create the account with the IAM User and Role Access to Billing Information switch enabled. If you enable it, IAM users and roles that have appropriate permissions can view billing information for the account. If you disable it, only the account root user can access billing information. For information about how to disable this switch for an account, see Granting Access to Your Billing Information and Tools.

" + "documentation":"

Creates an AWS account that is automatically a member of the organization whose credentials made the request. This is an asynchronous request that AWS performs in the background. Because CreateAccount operates asynchronously, it can return a successful completion message even though account initialization might still be in progress. You might need to wait a few minutes before you can successfully access the account. To check the status of the request, do one of the following:

  • Use the Id member of the CreateAccountStatus response element from this operation to provide as a parameter to the DescribeCreateAccountStatus operation.

  • Check the AWS CloudTrail log for the CreateAccountResult event. For information on using AWS CloudTrail with AWS Organizations, see Monitoring the Activity in Your Organization in the AWS Organizations User Guide.

The user who calls the API to create an account must have the organizations:CreateAccount permission. If you enabled all features in the organization, AWS Organizations creates the required service-linked role named AWSServiceRoleForOrganizations. For more information, see AWS Organizations and Service-Linked Roles in the AWS Organizations User Guide.

If the request includes tags, then the requester must have the organizations:TagResource permission.

AWS Organizations preconfigures the new member account with a role (named OrganizationAccountAccessRole by default) that grants users in the management account administrator permissions in the new member account. Principals in the management account can assume the role. AWS Organizations clones the company name and address information for the new account from the organization's management account.

This operation can be called only from the organization's management account.

For more information about creating accounts, see Creating an AWS Account in Your Organization in the AWS Organizations User Guide.

  • When you create an account in an organization using the AWS Organizations console, API, or CLI commands, the information required for the account to operate as a standalone account, such as a payment method and signing the end user license agreement (EULA) is not automatically collected. If you must remove an account from your organization later, you can do so only after you provide the missing information. Follow the steps at To leave an organization as a member account in the AWS Organizations User Guide.

  • If you get an exception that indicates that you exceeded your account limits for the organization, contact AWS Support.

  • If you get an exception that indicates that the operation failed because your organization is still initializing, wait one hour and then try again. If the error persists, contact AWS Support.

  • Using CreateAccount to create multiple temporary accounts isn't recommended. You can only close an account from the Billing and Cost Management Console, and you must be signed in as the root user. For information on the requirements and process for closing an account, see Closing an AWS Account in the AWS Organizations User Guide.

When you create a member account with this operation, you can choose whether to create the account with the IAM User and Role Access to Billing Information switch enabled. If you enable it, IAM users and roles that have appropriate permissions can view billing information for the account. If you disable it, only the account root user can access billing information. For information about how to disable this switch for an account, see Granting Access to Your Billing Information and Tools.

" }, "CreateGovCloudAccount":{ "name":"CreateGovCloudAccount", @@ -120,7 +120,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

This action is available if all of the following are true:

  • You're authorized to create accounts in the AWS GovCloud (US) Region. For more information on the AWS GovCloud (US) Region, see the AWS GovCloud User Guide.

  • You already have an account in the AWS GovCloud (US) Region that is associated with your master account in the commercial Region.

  • You call this action from the master account of your organization in the commercial Region.

  • You have the organizations:CreateGovCloudAccount permission.

AWS Organizations automatically creates the required service-linked role named AWSServiceRoleForOrganizations. For more information, see AWS Organizations and Service-Linked Roles in the AWS Organizations User Guide.

AWS automatically enables AWS CloudTrail for AWS GovCloud (US) accounts, but you should also do the following:

  • Verify that AWS CloudTrail is enabled to store logs.

  • Create an S3 bucket for AWS CloudTrail log storage.

    For more information, see Verifying AWS CloudTrail Is Enabled in the AWS GovCloud User Guide.

If the request includes tags, then the requester must have the organizations:TagResource permission. The tags are attached to the commercial account associated with the GovCloud account, rather than the GovCloud account itself. To add tags to the GovCloud account, call the TagResource operation in the GovCloud Region after the new GovCloud account exists.

You call this action from the master account of your organization in the commercial Region to create a standalone AWS account in the AWS GovCloud (US) Region. After the account is created, the master account of an organization in the AWS GovCloud (US) Region can invite it to that organization. For more information on inviting standalone accounts in the AWS GovCloud (US) to join an organization, see AWS Organizations in the AWS GovCloud User Guide.

Calling CreateGovCloudAccount is an asynchronous request that AWS performs in the background. Because CreateGovCloudAccount operates asynchronously, it can return a successful completion message even though account initialization might still be in progress. You might need to wait a few minutes before you can successfully access the account. To check the status of the request, do one of the following:

When you call the CreateGovCloudAccount action, you create two accounts: a standalone account in the AWS GovCloud (US) Region and an associated account in the commercial Region for billing and support purposes. The account in the commercial Region is automatically a member of the organization whose credentials made the request. Both accounts are associated with the same email address.

A role is created in the new account in the commercial Region that allows the master account in the organization in the commercial Region to assume it. An AWS GovCloud (US) account is then created and associated with the commercial account that you just created. A role is also created in the new AWS GovCloud (US) account that can be assumed by the AWS GovCloud (US) account that is associated with the master account of the commercial organization. For more information and to view a diagram that explains how account access works, see AWS Organizations in the AWS GovCloud User Guide.

For more information about creating accounts, see Creating an AWS Account in Your Organization in the AWS Organizations User Guide.

  • When you create an account in an organization using the AWS Organizations console, API, or CLI commands, the information required for the account to operate as a standalone account is not automatically collected. This includes a payment method and signing the end user license agreement (EULA). If you must remove an account from your organization later, you can do so only after you provide the missing information. Follow the steps at To leave an organization as a member account in the AWS Organizations User Guide.

  • If you get an exception that indicates that you exceeded your account limits for the organization, contact AWS Support.

  • If you get an exception that indicates that the operation failed because your organization is still initializing, wait one hour and then try again. If the error persists, contact AWS Support.

  • Using CreateGovCloudAccount to create multiple temporary accounts isn't recommended. You can only close an account from the AWS Billing and Cost Management console, and you must be signed in as the root user. For information on the requirements and process for closing an account, see Closing an AWS Account in the AWS Organizations User Guide.

When you create a member account with this operation, you can choose whether to create the account with the IAM User and Role Access to Billing Information switch enabled. If you enable it, IAM users and roles that have appropriate permissions can view billing information for the account. If you disable it, only the account root user can access billing information. For information about how to disable this switch for an account, see Granting Access to Your Billing Information and Tools.

" + "documentation":"

This action is available if all of the following are true:

  • You're authorized to create accounts in the AWS GovCloud (US) Region. For more information on the AWS GovCloud (US) Region, see the AWS GovCloud User Guide.

  • You already have an account in the AWS GovCloud (US) Region that is paired with a management account of an organization in the commercial Region.

  • You call this action from the management account of your organization in the commercial Region.

  • You have the organizations:CreateGovCloudAccount permission.

AWS Organizations automatically creates the required service-linked role named AWSServiceRoleForOrganizations. For more information, see AWS Organizations and Service-Linked Roles in the AWS Organizations User Guide.

AWS automatically enables AWS CloudTrail for AWS GovCloud (US) accounts, but you should also do the following:

  • Verify that AWS CloudTrail is enabled to store logs.

  • Create an S3 bucket for AWS CloudTrail log storage.

    For more information, see Verifying AWS CloudTrail Is Enabled in the AWS GovCloud User Guide.

If the request includes tags, then the requester must have the organizations:TagResource permission. The tags are attached to the commercial account associated with the GovCloud account, rather than the GovCloud account itself. To add tags to the GovCloud account, call the TagResource operation in the GovCloud Region after the new GovCloud account exists.

You call this action from the management account of your organization in the commercial Region to create a standalone AWS account in the AWS GovCloud (US) Region. After the account is created, the management account of an organization in the AWS GovCloud (US) Region can invite it to that organization. For more information on inviting standalone accounts in the AWS GovCloud (US) to join an organization, see AWS Organizations in the AWS GovCloud User Guide.

Calling CreateGovCloudAccount is an asynchronous request that AWS performs in the background. Because CreateGovCloudAccount operates asynchronously, it can return a successful completion message even though account initialization might still be in progress. You might need to wait a few minutes before you can successfully access the account. To check the status of the request, do one of the following:

When you call the CreateGovCloudAccount action, you create two accounts: a standalone account in the AWS GovCloud (US) Region and an associated account in the commercial Region for billing and support purposes. The account in the commercial Region is automatically a member of the organization whose credentials made the request. Both accounts are associated with the same email address.

A role is created in the new account in the commercial Region that allows the management account in the organization in the commercial Region to assume it. An AWS GovCloud (US) account is then created and associated with the commercial account that you just created. A role is also created in the new AWS GovCloud (US) account that can be assumed by the AWS GovCloud (US) account that is associated with the management account of the commercial organization. For more information and to view a diagram that explains how account access works, see AWS Organizations in the AWS GovCloud User Guide.

For more information about creating accounts, see Creating an AWS Account in Your Organization in the AWS Organizations User Guide.

  • When you create an account in an organization using the AWS Organizations console, API, or CLI commands, the information required for the account to operate as a standalone account is not automatically collected. This includes a payment method and signing the end user license agreement (EULA). If you must remove an account from your organization later, you can do so only after you provide the missing information. Follow the steps at To leave an organization as a member account in the AWS Organizations User Guide.

  • If you get an exception that indicates that you exceeded your account limits for the organization, contact AWS Support.

  • If you get an exception that indicates that the operation failed because your organization is still initializing, wait one hour and then try again. If the error persists, contact AWS Support.

  • Using CreateGovCloudAccount to create multiple temporary accounts isn't recommended. You can only close an account from the AWS Billing and Cost Management console, and you must be signed in as the root user. For information on the requirements and process for closing an account, see Closing an AWS Account in the AWS Organizations User Guide.

When you create a member account with this operation, you can choose whether to create the account with the IAM User and Role Access to Billing Information switch enabled. If you enable it, IAM users and roles that have appropriate permissions can view billing information for the account. If you disable it, only the account root user can access billing information. For information about how to disable this switch for an account, see Granting Access to Your Billing Information and Tools.

" }, "CreateOrganization":{ "name":"CreateOrganization", @@ -140,7 +140,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"AccessDeniedForDependencyException"} ], - "documentation":"

Creates an AWS organization. The account whose user is calling the CreateOrganization operation automatically becomes the master account of the new organization.

This operation must be called using credentials from the account that is to become the new organization's master account. The principal must also have the relevant IAM permissions.

By default (or if you set the FeatureSet parameter to ALL), the new organization is created with all features enabled and service control policies automatically enabled in the root. If you instead choose to create the organization supporting only the consolidated billing features by setting the FeatureSet parameter to CONSOLIDATED_BILLING\", no policy types are enabled by default, and you can't use organization policies

" + "documentation":"

Creates an AWS organization. The account whose user is calling the CreateOrganization operation automatically becomes the management account of the new organization.

This operation must be called using credentials from the account that is to become the new organization's management account. The principal must also have the relevant IAM permissions.

By default (or if you set the FeatureSet parameter to ALL), the new organization is created with all features enabled and service control policies automatically enabled in the root. If you instead choose to create the organization supporting only the consolidated billing features by setting the FeatureSet parameter to CONSOLIDATED_BILLING\", no policy types are enabled by default, and you can't use organization policies

" }, "CreateOrganizationalUnit":{ "name":"CreateOrganizationalUnit", @@ -161,7 +161,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Creates an organizational unit (OU) within a root or parent OU. An OU is a container for accounts that enables you to organize your accounts to apply policies according to your business requirements. The number of levels deep that you can nest OUs is dependent upon the policy types enabled for that root. For service control policies, the limit is five.

For more information about OUs, see Managing Organizational Units in the AWS Organizations User Guide.

If the request includes tags, then the requester must have the organizations:TagResource permission.

This operation can be called only from the organization's master account.

" + "documentation":"

Creates an organizational unit (OU) within a root or parent OU. An OU is a container for accounts that enables you to organize your accounts to apply policies according to your business requirements. The number of levels deep that you can nest OUs is dependent upon the policy types enabled for that root. For service control policies, the limit is five.

For more information about OUs, see Managing Organizational Units in the AWS Organizations User Guide.

If the request includes tags, then the requester must have the organizations:TagResource permission.

This operation can be called only from the organization's management account.

" }, "CreatePolicy":{ "name":"CreatePolicy", @@ -184,7 +184,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

Creates a policy of a specified type that you can attach to a root, an organizational unit (OU), or an individual AWS account.

For more information about policies and their use, see Managing Organization Policies.

If the request includes tags, then the requester must have the organizations:TagResource permission.

This operation can be called only from the organization's master account.

" + "documentation":"

Creates a policy of a specified type that you can attach to a root, an organizational unit (OU), or an individual AWS account.

For more information about policies and their use, see Managing Organization Policies.

If the request includes tags, then the requester must have the organizations:TagResource permission.

This operation can be called only from the organization's management account.

" }, "DeclineHandshake":{ "name":"DeclineHandshake", @@ -221,7 +221,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Deletes the organization. You can delete an organization only by using credentials from the master account. The organization must be empty of member accounts.

" + "documentation":"

Deletes the organization. You can delete an organization only by using credentials from the management account. The organization must be empty of member accounts.

" }, "DeleteOrganizationalUnit":{ "name":"DeleteOrganizationalUnit", @@ -240,7 +240,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Deletes an organizational unit (OU) from a root or another OU. You must first remove all accounts and child OUs from the OU that you want to delete.

This operation can be called only from the organization's master account.

" + "documentation":"

Deletes an organizational unit (OU) from a root or another OU. You must first remove all accounts and child OUs from the OU that you want to delete.

This operation can be called only from the organization's management account.

" }, "DeletePolicy":{ "name":"DeletePolicy", @@ -260,7 +260,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

Deletes the specified policy from your organization. Before you perform this operation, you must first detach the policy from all organizational units (OUs), roots, and accounts.

This operation can be called only from the organization's master account.

" + "documentation":"

Deletes the specified policy from your organization. Before you perform this operation, you must first detach the policy from all organizational units (OUs), roots, and accounts.

This operation can be called only from the organization's management account.

" }, "DeregisterDelegatedAdministrator":{ "name":"DeregisterDelegatedAdministrator", @@ -281,7 +281,7 @@ {"shape":"ServiceException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

Removes the specified member AWS account as a delegated administrator for the specified AWS service.

Deregistering a delegated administrator can have unintended impacts on the functionality of the enabled AWS service. See the documentation for the enabled service before you deregister a delegated administrator so that you understand any potential impacts.

You can run this action only for AWS services that support this feature. For a current list of services that support it, see the column Supports Delegated Administrator in the table at AWS Services that you can use with AWS Organizations in the AWS Organizations User Guide.

This operation can be called only from the organization's master account.

" + "documentation":"

Removes the specified member AWS account as a delegated administrator for the specified AWS service.

Deregistering a delegated administrator can have unintended impacts on the functionality of the enabled AWS service. See the documentation for the enabled service before you deregister a delegated administrator so that you understand any potential impacts.

You can run this action only for AWS services that support this feature. For a current list of services that support it, see the column Supports Delegated Administrator in the table at AWS Services that you can use with AWS Organizations in the AWS Organizations User Guide.

This operation can be called only from the organization's management account.

" }, "DescribeAccount":{ "name":"DescribeAccount", @@ -299,7 +299,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Retrieves AWS Organizations-related information about the specified account.

This operation can be called only from the organization's master account or by a member account that is a delegated administrator for an AWS service.

" + "documentation":"

Retrieves AWS Organizations-related information about the specified account.

This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an AWS service.

" }, "DescribeCreateAccountStatus":{ "name":"DescribeCreateAccountStatus", @@ -318,7 +318,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

Retrieves the current status of an asynchronous request to create an account.

This operation can be called only from the organization's master account or by a member account that is a delegated administrator for an AWS service.

" + "documentation":"

Retrieves the current status of an asynchronous request to create an account.

This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an AWS service.

" }, "DescribeEffectivePolicy":{ "name":"DescribeEffectivePolicy", @@ -339,7 +339,7 @@ {"shape":"InvalidInputException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

Returns the contents of the effective policy for specified policy type and account. The effective policy is the aggregation of any policies of the specified type that the account inherits, plus any policy of that type that is directly attached to the account.

This operation applies only to policy types other than service control policies (SCPs).

For more information about policy inheritance, see How Policy Inheritance Works in the AWS Organizations User Guide.

This operation can be called only from the organization's master account or by a member account that is a delegated administrator for an AWS service.

" + "documentation":"

Returns the contents of the effective policy for specified policy type and account. The effective policy is the aggregation of any policies of the specified type that the account inherits, plus any policy of that type that is directly attached to the account.

This operation applies only to policy types other than service control policies (SCPs).

For more information about policy inheritance, see How Policy Inheritance Works in the AWS Organizations User Guide.

This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an AWS service.

" }, "DescribeHandshake":{ "name":"DescribeHandshake", @@ -391,7 +391,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Retrieves information about an organizational unit (OU).

This operation can be called only from the organization's master account or by a member account that is a delegated administrator for an AWS service.

" + "documentation":"

Retrieves information about an organizational unit (OU).

This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an AWS service.

" }, "DescribePolicy":{ "name":"DescribePolicy", @@ -410,7 +410,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

Retrieves information about a policy.

This operation can be called only from the organization's master account or by a member account that is a delegated administrator for an AWS service.

" + "documentation":"

Retrieves information about a policy.

This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an AWS service.

" }, "DetachPolicy":{ "name":"DetachPolicy", @@ -433,7 +433,7 @@ {"shape":"UnsupportedAPIEndpointException"}, {"shape":"PolicyChangesInProgressException"} ], - "documentation":"

Detaches a policy from a target root, organizational unit (OU), or account.

If the policy being detached is a service control policy (SCP), the changes to permissions for AWS Identity and Access Management (IAM) users and roles in affected accounts are immediate.

Every root, OU, and account must have at least one SCP attached. If you want to replace the default FullAWSAccess policy with an SCP that limits the permissions that can be delegated, you must attach the replacement SCP before you can remove the default SCP. This is the authorization strategy of an \"allow list\". If you instead attach a second SCP and leave the FullAWSAccess SCP still attached, and specify \"Effect\": \"Deny\" in the second SCP to override the \"Effect\": \"Allow\" in the FullAWSAccess policy (or any other attached SCP), you're using the authorization strategy of a \"deny list\".

This operation can be called only from the organization's master account.

" + "documentation":"

Detaches a policy from a target root, organizational unit (OU), or account.

If the policy being detached is a service control policy (SCP), the changes to permissions for AWS Identity and Access Management (IAM) users and roles in affected accounts are immediate.

Every root, OU, and account must have at least one SCP attached. If you want to replace the default FullAWSAccess policy with an SCP that limits the permissions that can be delegated, you must attach the replacement SCP before you can remove the default SCP. This is the authorization strategy of an \"allow list\". If you instead attach a second SCP and leave the FullAWSAccess SCP still attached, and specify \"Effect\": \"Deny\" in the second SCP to override the \"Effect\": \"Allow\" in the FullAWSAccess policy (or any other attached SCP), you're using the authorization strategy of a \"deny list\".

This operation can be called only from the organization's management account.

" }, "DisableAWSServiceAccess":{ "name":"DisableAWSServiceAccess", @@ -452,7 +452,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

Disables the integration of an AWS service (the service that is specified by ServicePrincipal) with AWS Organizations. When you disable integration, the specified service no longer can create a service-linked role in new accounts in your organization. This means the service can't perform operations on your behalf on any new accounts in your organization. The service can still perform operations in older accounts until the service completes its clean-up from AWS Organizations.

We recommend that you disable integration between AWS Organizations and the specified AWS service by using the console or commands that are provided by the specified service. Doing so ensures that the other service is aware that it can clean up any resources that are required only for the integration. How the service cleans up its resources in the organization's accounts depends on that service. For more information, see the documentation for the other AWS service.

After you perform the DisableAWSServiceAccess operation, the specified service can no longer perform operations in your organization's accounts unless the operations are explicitly permitted by the IAM policies that are attached to your roles.

For more information about integrating other services with AWS Organizations, including the list of services that work with Organizations, see Integrating AWS Organizations with Other AWS Services in the AWS Organizations User Guide.

This operation can be called only from the organization's master account.

" + "documentation":"

Disables the integration of an AWS service (the service that is specified by ServicePrincipal) with AWS Organizations. When you disable integration, the specified service no longer can create a service-linked role in new accounts in your organization. This means the service can't perform operations on your behalf on any new accounts in your organization. The service can still perform operations in older accounts until the service completes its clean-up from AWS Organizations.

We recommend that you disable integration between AWS Organizations and the specified AWS service by using the console or commands that are provided by the specified service. Doing so ensures that the other service is aware that it can clean up any resources that are required only for the integration. How the service cleans up its resources in the organization's accounts depends on that service. For more information, see the documentation for the other AWS service.

After you perform the DisableAWSServiceAccess operation, the specified service can no longer perform operations in your organization's accounts unless the operations are explicitly permitted by the IAM policies that are attached to your roles.

For more information about integrating other services with AWS Organizations, including the list of services that work with Organizations, see Integrating AWS Organizations with Other AWS Services in the AWS Organizations User Guide.

This operation can be called only from the organization's management account.

" }, "DisablePolicyType":{ "name":"DisablePolicyType", @@ -475,7 +475,7 @@ {"shape":"UnsupportedAPIEndpointException"}, {"shape":"PolicyChangesInProgressException"} ], - "documentation":"

Disables an organizational policy type in a root. A policy of a certain type can be attached to entities in a root only if that type is enabled in the root. After you perform this operation, you no longer can attach policies of the specified type to that root or to any organizational unit (OU) or account in that root. You can undo this by using the EnablePolicyType operation.

This is an asynchronous request that AWS performs in the background. If you disable a policy type for a root, it still appears enabled for the organization if all features are enabled for the organization. AWS recommends that you first use ListRoots to see the status of policy types for a specified root, and then use this operation.

This operation can be called only from the organization's master account.

To view the status of available policy types in the organization, use DescribeOrganization.

" + "documentation":"

Disables an organizational policy type in a root. A policy of a certain type can be attached to entities in a root only if that type is enabled in the root. After you perform this operation, you no longer can attach policies of the specified type to that root or to any organizational unit (OU) or account in that root. You can undo this by using the EnablePolicyType operation.

This is an asynchronous request that AWS performs in the background. If you disable a policy type for a root, it still appears enabled for the organization if all features are enabled for the organization. AWS recommends that you first use ListRoots to see the status of policy types for a specified root, and then use this operation.

This operation can be called only from the organization's management account.

To view the status of available policy types in the organization, use DescribeOrganization.

" }, "EnableAWSServiceAccess":{ "name":"EnableAWSServiceAccess", @@ -494,7 +494,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

Enables the integration of an AWS service (the service that is specified by ServicePrincipal) with AWS Organizations. When you enable integration, you allow the specified service to create a service-linked role in all the accounts in your organization. This allows the service to perform operations on your behalf in your organization and its accounts.

We recommend that you enable integration between AWS Organizations and the specified AWS service by using the console or commands that are provided by the specified service. Doing so ensures that the service is aware that it can create the resources that are required for the integration. How the service creates those resources in the organization's accounts depends on that service. For more information, see the documentation for the other AWS service.

For more information about enabling services to integrate with AWS Organizations, see Integrating AWS Organizations with Other AWS Services in the AWS Organizations User Guide.

This operation can be called only from the organization's master account and only if the organization has enabled all features.

" + "documentation":"

Enables the integration of an AWS service (the service that is specified by ServicePrincipal) with AWS Organizations. When you enable integration, you allow the specified service to create a service-linked role in all the accounts in your organization. This allows the service to perform operations on your behalf in your organization and its accounts.

We recommend that you enable integration between AWS Organizations and the specified AWS service by using the console or commands that are provided by the specified service. Doing so ensures that the service is aware that it can create the resources that are required for the integration. How the service creates those resources in the organization's accounts depends on that service. For more information, see the documentation for the other AWS service.

For more information about enabling services to integrate with AWS Organizations, see Integrating AWS Organizations with Other AWS Services in the AWS Organizations User Guide.

This operation can be called only from the organization's management account and only if the organization has enabled all features.

" }, "EnableAllFeatures":{ "name":"EnableAllFeatures", @@ -513,7 +513,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Enables all features in an organization. This enables the use of organization policies that can restrict the services and actions that can be called in each account. Until you enable all features, you have access only to consolidated billing, and you can't use any of the advanced account administration features that AWS Organizations supports. For more information, see Enabling All Features in Your Organization in the AWS Organizations User Guide.

This operation is required only for organizations that were created explicitly with only the consolidated billing features enabled. Calling this operation sends a handshake to every invited account in the organization. The feature set change can be finalized and the additional features enabled only after all administrators in the invited accounts approve the change by accepting the handshake.

After you enable all features, you can separately enable or disable individual policy types in a root using EnablePolicyType and DisablePolicyType. To see the status of policy types in a root, use ListRoots.

After all invited member accounts accept the handshake, you finalize the feature set change by accepting the handshake that contains \"Action\": \"ENABLE_ALL_FEATURES\". This completes the change.

After you enable all features in your organization, the master account in the organization can apply policies on all member accounts. These policies can restrict what users and even administrators in those accounts can do. The master account can apply policies that prevent accounts from leaving the organization. Ensure that your account administrators are aware of this.

This operation can be called only from the organization's master account.

" + "documentation":"

Enables all features in an organization. This enables the use of organization policies that can restrict the services and actions that can be called in each account. Until you enable all features, you have access only to consolidated billing, and you can't use any of the advanced account administration features that AWS Organizations supports. For more information, see Enabling All Features in Your Organization in the AWS Organizations User Guide.

This operation is required only for organizations that were created explicitly with only the consolidated billing features enabled. Calling this operation sends a handshake to every invited account in the organization. The feature set change can be finalized and the additional features enabled only after all administrators in the invited accounts approve the change by accepting the handshake.

After you enable all features, you can separately enable or disable individual policy types in a root using EnablePolicyType and DisablePolicyType. To see the status of policy types in a root, use ListRoots.

After all invited member accounts accept the handshake, you finalize the feature set change by accepting the handshake that contains \"Action\": \"ENABLE_ALL_FEATURES\". This completes the change.

After you enable all features in your organization, the management account in the organization can apply policies on all member accounts. These policies can restrict what users and even administrators in those accounts can do. The management account can apply policies that prevent accounts from leaving the organization. Ensure that your account administrators are aware of this.

This operation can be called only from the organization's management account.

" }, "EnablePolicyType":{ "name":"EnablePolicyType", @@ -537,7 +537,7 @@ {"shape":"UnsupportedAPIEndpointException"}, {"shape":"PolicyChangesInProgressException"} ], - "documentation":"

Enables a policy type in a root. After you enable a policy type in a root, you can attach policies of that type to the root, any organizational unit (OU), or account in that root. You can undo this by using the DisablePolicyType operation.

This is an asynchronous request that AWS performs in the background. AWS recommends that you first use ListRoots to see the status of policy types for a specified root, and then use this operation.

This operation can be called only from the organization's master account.

You can enable a policy type in a root only if that policy type is available in the organization. To view the status of available policy types in the organization, use DescribeOrganization.

" + "documentation":"

Enables a policy type in a root. After you enable a policy type in a root, you can attach policies of that type to the root, any organizational unit (OU), or account in that root. You can undo this by using the DisablePolicyType operation.

This is an asynchronous request that AWS performs in the background. AWS recommends that you first use ListRoots to see the status of policy types for a specified root, and then use this operation.

This operation can be called only from the organization's management account.

You can enable a policy type in a root only if that policy type is available in the organization. To view the status of available policy types in the organization, use DescribeOrganization.

" }, "InviteAccountToOrganization":{ "name":"InviteAccountToOrganization", @@ -560,7 +560,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Sends an invitation to another account to join your organization as a member account. AWS Organizations sends email on your behalf to the email address that is associated with the other account's owner. The invitation is implemented as a Handshake whose details are in the response.

  • You can invite AWS accounts only from the same seller as the master account. For example, if your organization's master account was created by Amazon Internet Services Pvt. Ltd (AISPL), an AWS seller in India, you can invite only other AISPL accounts to your organization. You can't combine accounts from AISPL and AWS or from any other AWS seller. For more information, see Consolidated Billing in India.

  • If you receive an exception that indicates that you exceeded your account limits for the organization or that the operation failed because your organization is still initializing, wait one hour and then try again. If the error persists after an hour, contact AWS Support.

If the request includes tags, then the requester must have the organizations:TagResource permission.

This operation can be called only from the organization's master account.

" + "documentation":"

Sends an invitation to another account to join your organization as a member account. AWS Organizations sends email on your behalf to the email address that is associated with the other account's owner. The invitation is implemented as a Handshake whose details are in the response.

  • You can invite AWS accounts only from the same seller as the management account. For example, if your organization's management account was created by Amazon Internet Services Pvt. Ltd (AISPL), an AWS seller in India, you can invite only other AISPL accounts to your organization. You can't combine accounts from AISPL and AWS or from any other AWS seller. For more information, see Consolidated Billing in India.

  • If you receive an exception that indicates that you exceeded your account limits for the organization or that the operation failed because your organization is still initializing, wait one hour and then try again. If the error persists after an hour, contact AWS Support.

If the request includes tags, then the requester must have the organizations:TagResource permission.

This operation can be called only from the organization's management account.

" }, "LeaveOrganization":{ "name":"LeaveOrganization", @@ -579,7 +579,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Removes a member account from its parent organization. This version of the operation is performed by the account that wants to leave. To remove a member account as a user in the master account, use RemoveAccountFromOrganization instead.

This operation can be called only from a member account in the organization.

  • The master account in an organization with all features enabled can set service control policies (SCPs) that can restrict what administrators of member accounts can do. This includes preventing them from successfully calling LeaveOrganization and leaving the organization.

  • You can leave an organization as a member account only if the account is configured with the information required to operate as a standalone account. When you create an account in an organization using the AWS Organizations console, API, or CLI commands, the information required of standalone accounts is not automatically collected. For each account that you want to make standalone, you must perform the following steps. If any of the steps are already completed for this account, that step doesn't appear.

    • Choose a support plan

    • Provide and verify the required contact information

    • Provide a current payment method

    AWS uses the payment method to charge for any billable (not free tier) AWS activity that occurs while the account isn't attached to an organization. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • You can leave an organization only after you enable IAM user access to billing in your account. For more information, see Activating Access to the Billing and Cost Management Console in the AWS Billing and Cost Management User Guide.

  • After the account leaves the organization, all tags that were attached to the account object in the organization are deleted. AWS accounts outside of an organization do not support tags.

" + "documentation":"

Removes a member account from its parent organization. This version of the operation is performed by the account that wants to leave. To remove a member account as a user in the management account, use RemoveAccountFromOrganization instead.

This operation can be called only from a member account in the organization.

  • The management account in an organization with all features enabled can set service control policies (SCPs) that can restrict what administrators of member accounts can do. This includes preventing them from successfully calling LeaveOrganization and leaving the organization.

  • You can leave an organization as a member account only if the account is configured with the information required to operate as a standalone account. When you create an account in an organization using the AWS Organizations console, API, or CLI commands, the information required of standalone accounts is not automatically collected. For each account that you want to make standalone, you must perform the following steps. If any of the steps are already completed for this account, that step doesn't appear.

    • Choose a support plan

    • Provide and verify the required contact information

    • Provide a current payment method

    AWS uses the payment method to charge for any billable (not free tier) AWS activity that occurs while the account isn't attached to an organization. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • You can leave an organization only after you enable IAM user access to billing in your account. For more information, see Activating Access to the Billing and Cost Management Console in the AWS Billing and Cost Management User Guide.

  • After the account leaves the organization, all tags that were attached to the account object in the organization are deleted. AWS accounts outside of an organization do not support tags.

" }, "ListAWSServiceAccessForOrganization":{ "name":"ListAWSServiceAccessForOrganization", @@ -598,7 +598,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

Returns a list of the AWS services that you enabled to integrate with your organization. After a service on this list creates the resources that it requires for the integration, it can perform operations on your organization and its accounts.

For more information about integrating other services with AWS Organizations, including the list of services that currently work with Organizations, see Integrating AWS Organizations with Other AWS Services in the AWS Organizations User Guide.

This operation can be called only from the organization's master account or by a member account that is a delegated administrator for an AWS service.

" + "documentation":"

Returns a list of the AWS services that you enabled to integrate with your organization. After a service on this list creates the resources that it requires for the integration, it can perform operations on your organization and its accounts.

For more information about integrating other services with AWS Organizations, including the list of services that currently work with Organizations, see Integrating AWS Organizations with Other AWS Services in the AWS Organizations User Guide.

This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an AWS service.

" }, "ListAccounts":{ "name":"ListAccounts", @@ -615,7 +615,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Lists all the accounts in the organization. To request only the accounts in a specified root or organizational unit (OU), use the ListAccountsForParent operation instead.

Always check the NextToken response parameter for a null value when calling a List* operation. These operations can occasionally return an empty set of results even when there are more results available. The NextToken response parameter value is null only when there are no more results to display.

This operation can be called only from the organization's master account or by a member account that is a delegated administrator for an AWS service.

" + "documentation":"

Lists all the accounts in the organization. To request only the accounts in a specified root or organizational unit (OU), use the ListAccountsForParent operation instead.

Always check the NextToken response parameter for a null value when calling a List* operation. These operations can occasionally return an empty set of results even when there are more results available. The NextToken response parameter value is null only when there are no more results to display.

This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an AWS service.

" }, "ListAccountsForParent":{ "name":"ListAccountsForParent", @@ -633,7 +633,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Lists the accounts in an organization that are contained by the specified target root or organizational unit (OU). If you specify the root, you get a list of all the accounts that aren't in any OU. If you specify an OU, you get a list of all the accounts in only that OU and not in any child OUs. To get a list of all accounts in the organization, use the ListAccounts operation.

Always check the NextToken response parameter for a null value when calling a List* operation. These operations can occasionally return an empty set of results even when there are more results available. The NextToken response parameter value is null only when there are no more results to display.

This operation can be called only from the organization's master account or by a member account that is a delegated administrator for an AWS service.

" + "documentation":"

Lists the accounts in an organization that are contained by the specified target root or organizational unit (OU). If you specify the root, you get a list of all the accounts that aren't in any OU. If you specify an OU, you get a list of all the accounts in only that OU and not in any child OUs. To get a list of all accounts in the organization, use the ListAccounts operation.

Always check the NextToken response parameter for a null value when calling a List* operation. These operations can occasionally return an empty set of results even when there are more results available. The NextToken response parameter value is null only when there are no more results to display.

This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an AWS service.

" }, "ListChildren":{ "name":"ListChildren", @@ -651,7 +651,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Lists all of the organizational units (OUs) or accounts that are contained in the specified parent OU or root. This operation, along with ListParents enables you to traverse the tree structure that makes up this root.

Always check the NextToken response parameter for a null value when calling a List* operation. These operations can occasionally return an empty set of results even when there are more results available. The NextToken response parameter value is null only when there are no more results to display.

This operation can be called only from the organization's master account or by a member account that is a delegated administrator for an AWS service.

" + "documentation":"

Lists all of the organizational units (OUs) or accounts that are contained in the specified parent OU or root. This operation, along with ListParents enables you to traverse the tree structure that makes up this root.

Always check the NextToken response parameter for a null value when calling a List* operation. These operations can occasionally return an empty set of results even when there are more results available. The NextToken response parameter value is null only when there are no more results to display.

This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an AWS service.

" }, "ListCreateAccountStatus":{ "name":"ListCreateAccountStatus", @@ -669,7 +669,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

Lists the account creation requests that match the specified status that is currently being tracked for the organization.

Always check the NextToken response parameter for a null value when calling a List* operation. These operations can occasionally return an empty set of results even when there are more results available. The NextToken response parameter value is null only when there are no more results to display.

This operation can be called only from the organization's master account or by a member account that is a delegated administrator for an AWS service.

" + "documentation":"

Lists the account creation requests that match the specified status that is currently being tracked for the organization.

Always check the NextToken response parameter for a null value when calling a List* operation. These operations can occasionally return an empty set of results even when there are more results available. The NextToken response parameter value is null only when there are no more results to display.

This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an AWS service.

" }, "ListDelegatedAdministrators":{ "name":"ListDelegatedAdministrators", @@ -688,7 +688,7 @@ {"shape":"ServiceException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

Lists the AWS accounts that are designated as delegated administrators in this organization.

This operation can be called only from the organization's master account or by a member account that is a delegated administrator for an AWS service.

" + "documentation":"

Lists the AWS accounts that are designated as delegated administrators in this organization.

This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an AWS service.

" }, "ListDelegatedServicesForAccount":{ "name":"ListDelegatedServicesForAccount", @@ -709,7 +709,7 @@ {"shape":"ServiceException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

List the AWS services for which the specified account is a delegated administrator.

This operation can be called only from the organization's master account or by a member account that is a delegated administrator for an AWS service.

" + "documentation":"

List the AWS services for which the specified account is a delegated administrator.

This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an AWS service.

" }, "ListHandshakesForAccount":{ "name":"ListHandshakesForAccount", @@ -744,7 +744,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Lists the handshakes that are associated with the organization that the requesting user is part of. The ListHandshakesForOrganization operation returns a list of handshake structures. Each structure contains details and status about a handshake.

Handshakes that are ACCEPTED, DECLINED, or CANCELED appear in the results of this API for only 30 days after changing to that state. After that, they're deleted and no longer accessible.

Always check the NextToken response parameter for a null value when calling a List* operation. These operations can occasionally return an empty set of results even when there are more results available. The NextToken response parameter value is null only when there are no more results to display.

This operation can be called only from the organization's master account or by a member account that is a delegated administrator for an AWS service.

" + "documentation":"

Lists the handshakes that are associated with the organization that the requesting user is part of. The ListHandshakesForOrganization operation returns a list of handshake structures. Each structure contains details and status about a handshake.

Handshakes that are ACCEPTED, DECLINED, or CANCELED appear in the results of this API for only 30 days after changing to that state. After that, they're deleted and no longer accessible.

Always check the NextToken response parameter for a null value when calling a List* operation. These operations can occasionally return an empty set of results even when there are more results available. The NextToken response parameter value is null only when there are no more results to display.

This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an AWS service.

" }, "ListOrganizationalUnitsForParent":{ "name":"ListOrganizationalUnitsForParent", @@ -762,7 +762,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Lists the organizational units (OUs) in a parent organizational unit or root.

Always check the NextToken response parameter for a null value when calling a List* operation. These operations can occasionally return an empty set of results even when there are more results available. The NextToken response parameter value is null only when there are no more results to display.

This operation can be called only from the organization's master account or by a member account that is a delegated administrator for an AWS service.

" + "documentation":"

Lists the organizational units (OUs) in a parent organizational unit or root.

Always check the NextToken response parameter for a null value when calling a List* operation. These operations can occasionally return an empty set of results even when there are more results available. The NextToken response parameter value is null only when there are no more results to display.

This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an AWS service.

" }, "ListParents":{ "name":"ListParents", @@ -780,7 +780,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Lists the root or organizational units (OUs) that serve as the immediate parent of the specified child OU or account. This operation, along with ListChildren enables you to traverse the tree structure that makes up this root.

Always check the NextToken response parameter for a null value when calling a List* operation. These operations can occasionally return an empty set of results even when there are more results available. The NextToken response parameter value is null only when there are no more results to display.

This operation can be called only from the organization's master account or by a member account that is a delegated administrator for an AWS service.

In the current release, a child can have only a single parent.

" + "documentation":"

Lists the root or organizational units (OUs) that serve as the immediate parent of the specified child OU or account. This operation, along with ListChildren enables you to traverse the tree structure that makes up this root.

Always check the NextToken response parameter for a null value when calling a List* operation. These operations can occasionally return an empty set of results even when there are more results available. The NextToken response parameter value is null only when there are no more results to display.

This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an AWS service.

In the current release, a child can have only a single parent.

" }, "ListPolicies":{ "name":"ListPolicies", @@ -798,7 +798,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

Retrieves the list of all policies in an organization of a specified type.

Always check the NextToken response parameter for a null value when calling a List* operation. These operations can occasionally return an empty set of results even when there are more results available. The NextToken response parameter value is null only when there are no more results to display.

This operation can be called only from the organization's master account or by a member account that is a delegated administrator for an AWS service.

" + "documentation":"

Retrieves the list of all policies in an organization of a specified type.

Always check the NextToken response parameter for a null value when calling a List* operation. These operations can occasionally return an empty set of results even when there are more results available. The NextToken response parameter value is null only when there are no more results to display.

This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an AWS service.

" }, "ListPoliciesForTarget":{ "name":"ListPoliciesForTarget", @@ -817,7 +817,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

Lists the policies that are directly attached to the specified target root, organizational unit (OU), or account. You must specify the policy type that you want included in the returned list.

Always check the NextToken response parameter for a null value when calling a List* operation. These operations can occasionally return an empty set of results even when there are more results available. The NextToken response parameter value is null only when there are no more results to display.

This operation can be called only from the organization's master account or by a member account that is a delegated administrator for an AWS service.

" + "documentation":"

Lists the policies that are directly attached to the specified target root, organizational unit (OU), or account. You must specify the policy type that you want included in the returned list.

Always check the NextToken response parameter for a null value when calling a List* operation. These operations can occasionally return an empty set of results even when there are more results available. The NextToken response parameter value is null only when there are no more results to display.

This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an AWS service.

" }, "ListRoots":{ "name":"ListRoots", @@ -834,7 +834,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Lists the roots that are defined in the current organization.

Always check the NextToken response parameter for a null value when calling a List* operation. These operations can occasionally return an empty set of results even when there are more results available. The NextToken response parameter value is null only when there are no more results to display.

This operation can be called only from the organization's master account or by a member account that is a delegated administrator for an AWS service.

Policy types can be enabled and disabled in roots. This is distinct from whether they're available in the organization. When you enable all features, you make policy types available for use in that organization. Individual policy types can then be enabled and disabled in a root. To see the availability of a policy type in an organization, use DescribeOrganization.

" + "documentation":"

Lists the roots that are defined in the current organization.

Always check the NextToken response parameter for a null value when calling a List* operation. These operations can occasionally return an empty set of results even when there are more results available. The NextToken response parameter value is null only when there are no more results to display.

This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an AWS service.

Policy types can be enabled and disabled in roots. This is distinct from whether they're available in the organization. When you enable all features, you make policy types available for use in that organization. Individual policy types can then be enabled and disabled in a root. To see the availability of a policy type in an organization, use DescribeOrganization.

" }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -852,7 +852,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Lists tags that are attached to the specified resource.

You can attach tags to the following resources in AWS Organizations.

  • AWS account

  • Organization root

  • Organizational unit (OU)

  • Policy (any type)

This operation can be called only from the organization's master account or by a member account that is a delegated administrator for an AWS service.

" + "documentation":"

Lists tags that are attached to the specified resource.

You can attach tags to the following resources in AWS Organizations.

  • AWS account

  • Organization root

  • Organizational unit (OU)

  • Policy (any type)

This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an AWS service.

" }, "ListTargetsForPolicy":{ "name":"ListTargetsForPolicy", @@ -871,7 +871,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

Lists all the roots, organizational units (OUs), and accounts that the specified policy is attached to.

Always check the NextToken response parameter for a null value when calling a List* operation. These operations can occasionally return an empty set of results even when there are more results available. The NextToken response parameter value is null only when there are no more results to display.

This operation can be called only from the organization's master account or by a member account that is a delegated administrator for an AWS service.

" + "documentation":"

Lists all the roots, organizational units (OUs), and accounts that the specified policy is attached to.

Always check the NextToken response parameter for a null value when calling a List* operation. These operations can occasionally return an empty set of results even when there are more results available. The NextToken response parameter value is null only when there are no more results to display.

This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an AWS service.

" }, "MoveAccount":{ "name":"MoveAccount", @@ -892,7 +892,7 @@ {"shape":"AWSOrganizationsNotInUseException"}, {"shape":"ServiceException"} ], - "documentation":"

Moves an account from its current source parent root or organizational unit (OU) to the specified destination parent root or OU.

This operation can be called only from the organization's master account.

" + "documentation":"

Moves an account from its current source parent root or organizational unit (OU) to the specified destination parent root or OU.

This operation can be called only from the organization's management account.

" }, "RegisterDelegatedAdministrator":{ "name":"RegisterDelegatedAdministrator", @@ -913,7 +913,7 @@ {"shape":"ServiceException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

Enables the specified member account to administer the Organizations features of the specified AWS service. It grants read-only access to AWS Organizations service data. The account still requires IAM permissions to access and administer the AWS service.

You can run this action only for AWS services that support this feature. For a current list of services that support it, see the column Supports Delegated Administrator in the table at AWS Services that you can use with AWS Organizations in the AWS Organizations User Guide.

This operation can be called only from the organization's master account.

" + "documentation":"

Enables the specified member account to administer the Organizations features of the specified AWS service. It grants read-only access to AWS Organizations service data. The account still requires IAM permissions to access and administer the AWS service.

You can run this action only for AWS services that support this feature. For a current list of services that support it, see the column Supports Delegated Administrator in the table at AWS Services that you can use with AWS Organizations in the AWS Organizations User Guide.

This operation can be called only from the organization's management account.

" }, "RemoveAccountFromOrganization":{ "name":"RemoveAccountFromOrganization", @@ -933,7 +933,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Removes the specified account from the organization.

The removed account becomes a standalone account that isn't a member of any organization. It's no longer subject to any policies and is responsible for its own bill payments. The organization's master account is no longer charged for any expenses accrued by the member account after it's removed from the organization.

This operation can be called only from the organization's master account. Member accounts can remove themselves with LeaveOrganization instead.

  • You can remove an account from your organization only if the account is configured with the information required to operate as a standalone account. When you create an account in an organization using the AWS Organizations console, API, or CLI commands, the information required of standalone accounts is not automatically collected. For an account that you want to make standalone, you must choose a support plan, provide and verify the required contact information, and provide a current payment method. AWS uses the payment method to charge for any billable (not free tier) AWS activity that occurs while the account isn't attached to an organization. To remove an account that doesn't yet have this information, you must sign in as the member account and follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • After the account leaves the organization, all tags that were attached to the account object in the organization are deleted. AWS accounts outside of an organization do not support tags.

" + "documentation":"

Removes the specified account from the organization.

The removed account becomes a standalone account that isn't a member of any organization. It's no longer subject to any policies and is responsible for its own bill payments. The organization's management account is no longer charged for any expenses accrued by the member account after it's removed from the organization.

This operation can be called only from the organization's management account. Member accounts can remove themselves with LeaveOrganization instead.

  • You can remove an account from your organization only if the account is configured with the information required to operate as a standalone account. When you create an account in an organization using the AWS Organizations console, API, or CLI commands, the information required of standalone accounts is not automatically collected. For an account that you want to make standalone, you must choose a support plan, provide and verify the required contact information, and provide a current payment method. AWS uses the payment method to charge for any billable (not free tier) AWS activity that occurs while the account isn't attached to an organization. To remove an account that doesn't yet have this information, you must sign in as the member account and follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • After the account leaves the organization, all tags that were attached to the account object in the organization are deleted. AWS accounts outside of an organization do not support tags.

" }, "TagResource":{ "name":"TagResource", @@ -952,7 +952,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Adds one or more tags to the specified resource.

Currently, you can attach tags to the following resources in AWS Organizations.

  • AWS account

  • Organization root

  • Organizational unit (OU)

  • Policy (any type)

This operation can be called only from the organization's master account.

" + "documentation":"

Adds one or more tags to the specified resource.

Currently, you can attach tags to the following resources in AWS Organizations.

  • AWS account

  • Organization root

  • Organizational unit (OU)

  • Policy (any type)

This operation can be called only from the organization's management account.

" }, "UntagResource":{ "name":"UntagResource", @@ -971,7 +971,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Removes any tags with the specified keys from the specified resource.

You can attach tags to the following resources in AWS Organizations.

  • AWS account

  • Organization root

  • Organizational unit (OU)

  • Policy (any type)

This operation can be called only from the organization's master account.

" + "documentation":"

Removes any tags with the specified keys from the specified resource.

You can attach tags to the following resources in AWS Organizations.

  • AWS account

  • Organization root

  • Organizational unit (OU)

  • Policy (any type)

This operation can be called only from the organization's management account.

" }, "UpdateOrganizationalUnit":{ "name":"UpdateOrganizationalUnit", @@ -991,7 +991,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Renames the specified organizational unit (OU). The ID and ARN don't change. The child OUs and accounts remain in place, and any attached policies of the OU remain attached.

This operation can be called only from the organization's master account.

" + "documentation":"

Renames the specified organizational unit (OU). The ID and ARN don't change. The child OUs and accounts remain in place, and any attached policies of the OU remain attached.

This operation can be called only from the organization's management account.

" }, "UpdatePolicy":{ "name":"UpdatePolicy", @@ -1015,7 +1015,7 @@ {"shape":"UnsupportedAPIEndpointException"}, {"shape":"PolicyChangesInProgressException"} ], - "documentation":"

Updates an existing policy with a new name, description, or content. If you don't supply any parameter, that value remains unchanged. You can't change a policy's type.

This operation can be called only from the organization's master account.

" + "documentation":"

Updates an existing policy with a new name, description, or content. If you don't supply any parameter, that value remains unchanged. You can't change a policy's type.

This operation can be called only from the organization's management account.

" } }, "shapes":{ @@ -1153,7 +1153,7 @@ "members":{ "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

You can't invite an existing account to your organization until you verify that you own the email address associated with the master account. For more information, see Email Address Verification in the AWS Organizations User Guide.

", + "documentation":"

You can't invite an existing account to your organization until you verify that you own the email address associated with the management account. For more information, see Email Address Verification in the AWS Organizations User Guide.

", "exception":true }, "AccountStatus":{ @@ -1226,7 +1226,7 @@ "members":{ "Id":{ "shape":"ChildId", - "documentation":"

The unique identifier (ID) of this child entity.

The regex pattern for a child ID string requires one of the following:

  • Account: A string that consists of exactly 12 digits.

  • Organizational unit (OU): A string that begins with \"ou-\" followed by from 4 to 32 lower-case letters or digits (the ID of the root that contains the OU). This string is followed by a second \"-\" dash and from 8 to 32 additional lower-case letters or digits.

" + "documentation":"

The unique identifier (ID) of this child entity.

The regex pattern for a child ID string requires one of the following:

  • Account - A string that consists of exactly 12 digits.

  • Organizational unit (OU) - A string that begins with \"ou-\" followed by from 4 to 32 lowercase letters or digits (the ID of the root that contains the OU). This string is followed by a second \"-\" dash and from 8 to 32 additional lowercase letters or digits.

" }, "Type":{ "shape":"ChildType", @@ -1273,7 +1273,7 @@ "Message":{"shape":"ExceptionMessage"}, "Reason":{"shape":"ConstraintViolationExceptionReason"} }, - "documentation":"

Performing this operation violates a minimum or maximum value limit. For example, attempting to remove the last service control policy (SCP) from an OU or root, inviting or creating too many accounts to the organization, or attaching too many policies to an account, OU, or root. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation.

  • ACCOUNT_CANNOT_LEAVE_ORGANIZAION: You attempted to remove the master account from the organization. You can't remove the master account. Instead, after you remove all member accounts, delete the organization itself.

  • ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account from the organization that doesn't yet have enough information to exist as a standalone account. This account requires you to first agree to the AWS Customer Agreement. Follow the steps at Removing a member account from your organizationin the AWS Organizations User Guide.

  • ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove an account from the organization that doesn't yet have enough information to exist as a standalone account. This account requires you to first complete phone verification. Follow the steps at Removing a member account from your organization in the AWS Organizations User Guide.

  • ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of accounts that you can create in one day.

  • ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on the number of accounts in an organization. If you need more accounts, contact AWS Support to request an increase in your limit.

    Or the number of invitations that you tried to send would cause you to exceed the limit of accounts in your organization. Send fewer invitations or contact AWS Support to request an increase in the number of accounts.

    Deleted and closed accounts still count toward your limit.

    If you get this exception when running a command immediately after creating the organization, wait one hour and try again. After an hour, if the command continues to fail with this error, contact AWS Support.

  • CANNOT_REGISTER_MASTER_AS_DELEGATED_ADMINISTRATOR: You attempted to register the master account of the organization as a delegated administrator for an AWS service integrated with Organizations. You can designate only a member account as a delegated administrator.

  • CANNOT_REMOVE_DELEGATED_ADMINISTRATOR_FROM_ORG: You attempted to remove an account that is registered as a delegated administrator for a service integrated with your organization. To complete this operation, you must first deregister this account as a delegated administrator.

  • CREATE_ORGANIZATION_IN_BILLING_MODE_UNSUPPORTED_REGION: To create an organization in the specified region, you must enable all features mode.

  • DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE: You attempted to register an AWS account as a delegated administrator for an AWS service that already has a delegated administrator. To complete this operation, you must first deregister any existing delegated administrators for this service.

  • EMAIL_VERIFICATION_CODE_EXPIRED: The email verification code is only valid for a limited period of time. You must resubmit the request and generate a new verfication code.

  • HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of handshakes that you can send in one day.

  • MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account in this organization, you first must migrate the organization's master account to the marketplace that corresponds to the master account's address. For example, accounts with India addresses must be associated with the AISPL marketplace. All accounts in an organization must be associated with the same marketplace.

  • MASTER_ACCOUNT_MISSING_BUSINESS_LICENSE: Applies only to the AWS Regions in China. To create an organization, the master must have an valid business license. For more information, contact customer support.

  • MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you must first provide a valid contact address and phone number for the master account. Then try the operation again.

  • MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To complete this operation, the master account must have an associated account in the AWS GovCloud (US-West) Region. For more information, see AWS Organizations in the AWS GovCloud User Guide.

  • MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization with this master account, you first must associate a valid payment instrument, such as a credit card, with the account. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • MAX_DELEGATED_ADMINISTRATORS_FOR_SERVICE_LIMIT_EXCEEDED: You attempted to register more delegated administrators than allowed for the service principal.

  • MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the number of policies of a certain type that can be attached to an entity at one time.

  • MAX_TAG_LIMIT_EXCEEDED: You have exceeded the number of tags allowed on this resource.

  • MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation with this member account, you first must associate a valid payment instrument, such as a credit card, with the account. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a policy from an entity that would cause the entity to have fewer than the minimum number of policies of a certain type required.

  • ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation that requires the organization to be configured to support all features. An organization that supports only consolidated billing features can't perform this operation.

  • OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is too many levels deep.

  • OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs that you can have in an organization.

  • POLICY_CONTENT_LIMIT_EXCEEDED: You attempted to create a policy that is larger than the maximum size.

  • POLICY_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of policies that you can have in an organization.

  • TAG_POLICY_VIOLATION: You attempted to create or update a resource with tags that are not compliant with the tag policy requirements for this account.

", + "documentation":"

Performing this operation violates a minimum or maximum value limit. For example, attempting to remove the last service control policy (SCP) from an OU or root, inviting or creating too many accounts to the organization, or attaching too many policies to an account, OU, or root. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation.

  • ACCOUNT_CANNOT_LEAVE_ORGANIZAION: You attempted to remove the management account from the organization. You can't remove the management account. Instead, after you remove all member accounts, delete the organization itself.

  • ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account from the organization that doesn't yet have enough information to exist as a standalone account. This account requires you to first agree to the AWS Customer Agreement. Follow the steps at Removing a member account from your organizationin the AWS Organizations User Guide.

  • ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove an account from the organization that doesn't yet have enough information to exist as a standalone account. This account requires you to first complete phone verification. Follow the steps at Removing a member account from your organization in the AWS Organizations User Guide.

  • ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of accounts that you can create in one day.

  • ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on the number of accounts in an organization. If you need more accounts, contact AWS Support to request an increase in your limit.

    Or the number of invitations that you tried to send would cause you to exceed the limit of accounts in your organization. Send fewer invitations or contact AWS Support to request an increase in the number of accounts.

    Deleted and closed accounts still count toward your limit.

    If you get this exception when running a command immediately after creating the organization, wait one hour and try again. After an hour, if the command continues to fail with this error, contact AWS Support.

  • CANNOT_REGISTER_MASTER_AS_DELEGATED_ADMINISTRATOR: You attempted to register the management account of the organization as a delegated administrator for an AWS service integrated with Organizations. You can designate only a member account as a delegated administrator.

  • CANNOT_REMOVE_DELEGATED_ADMINISTRATOR_FROM_ORG: You attempted to remove an account that is registered as a delegated administrator for a service integrated with your organization. To complete this operation, you must first deregister this account as a delegated administrator.

  • CREATE_ORGANIZATION_IN_BILLING_MODE_UNSUPPORTED_REGION: To create an organization in the specified region, you must enable all features mode.

  • DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE: You attempted to register an AWS account as a delegated administrator for an AWS service that already has a delegated administrator. To complete this operation, you must first deregister any existing delegated administrators for this service.

  • EMAIL_VERIFICATION_CODE_EXPIRED: The email verification code is only valid for a limited period of time. You must resubmit the request and generate a new verfication code.

  • HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of handshakes that you can send in one day.

  • MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account in this organization, you first must migrate the organization's management account to the marketplace that corresponds to the management account's address. For example, accounts with India addresses must be associated with the AISPL marketplace. All accounts in an organization must be associated with the same marketplace.

  • MASTER_ACCOUNT_MISSING_BUSINESS_LICENSE: Applies only to the AWS Regions in China. To create an organization, the master must have an valid business license. For more information, contact customer support.

  • MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you must first provide a valid contact address and phone number for the management account. Then try the operation again.

  • MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To complete this operation, the management account must have an associated account in the AWS GovCloud (US-West) Region. For more information, see AWS Organizations in the AWS GovCloud User Guide.

  • MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization with this management account, you first must associate a valid payment instrument, such as a credit card, with the account. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • MAX_DELEGATED_ADMINISTRATORS_FOR_SERVICE_LIMIT_EXCEEDED: You attempted to register more delegated administrators than allowed for the service principal.

  • MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the number of policies of a certain type that can be attached to an entity at one time.

  • MAX_TAG_LIMIT_EXCEEDED: You have exceeded the number of tags allowed on this resource.

  • MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation with this member account, you first must associate a valid payment instrument, such as a credit card, with the account. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a policy from an entity that would cause the entity to have fewer than the minimum number of policies of a certain type required.

  • ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation that requires the organization to be configured to support all features. An organization that supports only consolidated billing features can't perform this operation.

  • OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is too many levels deep.

  • OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs that you can have in an organization.

  • POLICY_CONTENT_LIMIT_EXCEEDED: You attempted to create a policy that is larger than the maximum size.

  • POLICY_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of policies that you can have in an organization.

  • TAG_POLICY_VIOLATION: You attempted to create or update a resource with tags that are not compliant with the tag policy requirements for this account.

", "exception":true }, "ConstraintViolationExceptionReason":{ @@ -1340,7 +1340,7 @@ }, "RoleName":{ "shape":"RoleName", - "documentation":"

(Optional)

The name of an IAM role that AWS Organizations automatically preconfigures in the new member account. This role trusts the master account, allowing users in the master account to assume the role, as permitted by the master account administrator. The role has administrator permissions in the new member account.

If you don't specify this parameter, the role name defaults to OrganizationAccountAccessRole.

For more information about how to use this role to access the member account, see the following links:

The regex pattern that is used to validate this parameter. The pattern can include uppercase letters, lowercase letters, digits with no spaces, and any of the following characters: =,.@-

" + "documentation":"

(Optional)

The name of an IAM role that AWS Organizations automatically preconfigures in the new member account. This role trusts the management account, allowing users in the management account to assume the role, as permitted by the management account administrator. The role has administrator permissions in the new member account.

If you don't specify this parameter, the role name defaults to OrganizationAccountAccessRole.

For more information about how to use this role to access the member account, see the following links:

The regex pattern that is used to validate this parameter. The pattern can include uppercase letters, lowercase letters, digits with no spaces, and any of the following characters: =,.@-

" }, "IamUserAccessToBilling":{ "shape":"IAMUserAccessToBilling", @@ -1383,7 +1383,7 @@ "members":{ "Id":{ "shape":"CreateAccountRequestId", - "documentation":"

The unique identifier (ID) that references this request. You get this value from the response of the initial CreateAccount request to create the account.

The regex pattern for a create account request ID string requires \"car-\" followed by from 8 to 32 lower-case letters or digits.

" + "documentation":"

The unique identifier (ID) that references this request. You get this value from the response of the initial CreateAccount request to create the account.

The regex pattern for a create account request ID string requires \"car-\" followed by from 8 to 32 lowercase letters or digits.

" }, "AccountName":{ "shape":"AccountName", @@ -1411,7 +1411,7 @@ }, "FailureReason":{ "shape":"CreateAccountFailureReason", - "documentation":"

If the request failed, a description of the reason for the failure.

  • ACCOUNT_LIMIT_EXCEEDED: The account could not be created because you have reached the limit on the number of accounts in your organization.

  • CONCURRENT_ACCOUNT_MODIFICATION: You already submitted a request with the same information.

  • EMAIL_ALREADY_EXISTS: The account could not be created because another AWS account with that email address already exists.

  • GOVCLOUD_ACCOUNT_ALREADY_EXISTS: The account in the AWS GovCloud (US) Region could not be created because this Region already includes an account with that email address.

  • INVALID_ADDRESS: The account could not be created because the address you provided is not valid.

  • INVALID_EMAIL: The account could not be created because the email address you provided is not valid.

  • INTERNAL_FAILURE: The account could not be created because of an internal failure. Try again later. If the problem persists, contact Customer Support.

  • MISSING_BUSINESS_VALIDATION: The AWS account that owns your organization has not received Business Validation.

  • MISSING_PAYMENT_INSTRUMENT: You must configure the master account with a valid payment method, such as a credit card.

" + "documentation":"

If the request failed, a description of the reason for the failure.

  • ACCOUNT_LIMIT_EXCEEDED: The account could not be created because you have reached the limit on the number of accounts in your organization.

  • CONCURRENT_ACCOUNT_MODIFICATION: You already submitted a request with the same information.

  • EMAIL_ALREADY_EXISTS: The account could not be created because another AWS account with that email address already exists.

  • GOVCLOUD_ACCOUNT_ALREADY_EXISTS: The account in the AWS GovCloud (US) Region could not be created because this Region already includes an account with that email address.

  • INVALID_ADDRESS: The account could not be created because the address you provided is not valid.

  • INVALID_EMAIL: The account could not be created because the email address you provided is not valid.

  • INTERNAL_FAILURE: The account could not be created because of an internal failure. Try again later. If the problem persists, contact Customer Support.

  • MISSING_BUSINESS_VALIDATION: The AWS account that owns your organization has not received Business Validation.

  • MISSING_PAYMENT_INSTRUMENT: You must configure the management account with a valid payment method, such as a credit card.

" } }, "documentation":"

Contains the status about a CreateAccount or CreateGovCloudAccount request to create an AWS account or an AWS GovCloud (US) account in an organization.

" @@ -1445,7 +1445,7 @@ }, "RoleName":{ "shape":"RoleName", - "documentation":"

(Optional)

The name of an IAM role that AWS Organizations automatically preconfigures in the new member accounts in both the AWS GovCloud (US) Region and in the commercial Region. This role trusts the master account, allowing users in the master account to assume the role, as permitted by the master account administrator. The role has administrator permissions in the new member account.

If you don't specify this parameter, the role name defaults to OrganizationAccountAccessRole.

For more information about how to use this role to access the member account, see Accessing and Administering the Member Accounts in Your Organization in the AWS Organizations User Guide and steps 2 and 3 in Tutorial: Delegate Access Across AWS Accounts Using IAM Roles in the IAM User Guide.

The regex pattern that is used to validate this parameter. The pattern can include uppercase letters, lowercase letters, digits with no spaces, and any of the following characters: =,.@-

" + "documentation":"

(Optional)

The name of an IAM role that AWS Organizations automatically preconfigures in the new member accounts in both the AWS GovCloud (US) Region and in the commercial Region. This role trusts the management account, allowing users in the management account to assume the role, as permitted by the management account administrator. The role has administrator permissions in the new member account.

If you don't specify this parameter, the role name defaults to OrganizationAccountAccessRole.

For more information about how to use this role to access the member account, see Accessing and Administering the Member Accounts in Your Organization in the AWS Organizations User Guide and steps 2 and 3 in Tutorial: Delegate Access Across AWS Accounts Using IAM Roles in the IAM User Guide.

The regex pattern that is used to validate this parameter. The pattern can include uppercase letters, lowercase letters, digits with no spaces, and any of the following characters: =,.@-

" }, "IamUserAccessToBilling":{ "shape":"IAMUserAccessToBilling", @@ -1468,7 +1468,7 @@ "members":{ "FeatureSet":{ "shape":"OrganizationFeatureSet", - "documentation":"

Specifies the feature set supported by the new organization. Each feature set supports different levels of functionality.

  • CONSOLIDATED_BILLING: All member accounts have their bills consolidated to and paid by the master account. For more information, see Consolidated billing in the AWS Organizations User Guide.

    The consolidated billing feature subset isn't available for organizations in the AWS GovCloud (US) Region.

  • ALL: In addition to all the features supported by the consolidated billing feature set, the master account can also apply any policy type to any member account in the organization. For more information, see All features in the AWS Organizations User Guide.

" + "documentation":"

Specifies the feature set supported by the new organization. Each feature set supports different levels of functionality.

  • CONSOLIDATED_BILLING: All member accounts have their bills consolidated to and paid by the management account. For more information, see Consolidated billing in the AWS Organizations User Guide.

    The consolidated billing feature subset isn't available for organizations in the AWS GovCloud (US) Region.

  • ALL: In addition to all the features supported by the consolidated billing feature set, the management account can also apply any policy type to any member account in the organization. For more information, see All features in the AWS Organizations User Guide.

" } } }, @@ -1534,7 +1534,7 @@ }, "Type":{ "shape":"PolicyType", - "documentation":"

The type of policy to create. You can specify one of the following values:

" + "documentation":"

The type of policy to create. You can specify one of the following values:

" }, "Tags":{ "shape":"Tags", @@ -1692,7 +1692,7 @@ "members":{ "CreateAccountRequestId":{ "shape":"CreateAccountRequestId", - "documentation":"

Specifies the operationId that uniquely identifies the request. You can get the ID from the response to an earlier CreateAccount request, or from the ListCreateAccountStatus operation.

The regex pattern for a create account request ID string requires \"car-\" followed by from 8 to 32 lowercase letters or digits.

" + "documentation":"

Specifies the Id value that uniquely identifies the CreateAccount request. You can get the value from the CreateAccountStatus.Id response in an earlier CreateAccount request, or from the ListCreateAccountStatus operation.

The regex pattern for a create account request ID string requires \"car-\" followed by from 8 to 32 lowercase letters or digits.

" } } }, @@ -1711,11 +1711,11 @@ "members":{ "PolicyType":{ "shape":"EffectivePolicyType", - "documentation":"

The type of policy that you want information about. You can specify one of the following values:

" + "documentation":"

The type of policy that you want information about. You can specify one of the following values:

" }, "TargetId":{ "shape":"PolicyTargetId", - "documentation":"

When you're signed in as the master account, specify the ID of the account that you want details about. Specifying an organization root or organizational unit (OU) as the target is not supported.

" + "documentation":"

When you're signed in as the management account, specify the ID of the account that you want details about. Specifying an organization root or organizational unit (OU) as the target is not supported.

" } } }, @@ -1842,7 +1842,7 @@ }, "PolicyType":{ "shape":"PolicyType", - "documentation":"

The policy type that you want to disable in this root. You can specify one of the following values:

" + "documentation":"

The policy type that you want to disable in this root. You can specify one of the following values:

" } } }, @@ -1922,7 +1922,7 @@ "members":{ "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

If you ran this action on the master account, this policy type is not enabled. If you ran the action on a member account, the account doesn't have an effective policy of this type. Contact the administrator of your organization about attaching a policy of this type to the account.

", + "documentation":"

If you ran this action on the management account, this policy type is not enabled. If you ran the action on a member account, the account doesn't have an effective policy of this type. Contact the administrator of your organization about attaching a policy of this type to the account.

", "exception":true }, "EffectivePolicyType":{ @@ -1977,7 +1977,7 @@ }, "PolicyType":{ "shape":"PolicyType", - "documentation":"

The policy type that you want to enable. You can specify one of the following values:

" + "documentation":"

The policy type that you want to enable. You can specify one of the following values:

" } } }, @@ -2027,7 +2027,7 @@ "members":{ "Id":{ "shape":"HandshakeId", - "documentation":"

The unique identifier (ID) of a handshake. The originating account creates the ID when it initiates the handshake.

The regex pattern for handshake ID string requires \"h-\" followed by from 8 to 32 lower-case letters or digits.

" + "documentation":"

The unique identifier (ID) of a handshake. The originating account creates the ID when it initiates the handshake.

The regex pattern for handshake ID string requires \"h-\" followed by from 8 to 32 lowercase letters or digits.

" }, "Arn":{ "shape":"HandshakeArn", @@ -2051,14 +2051,14 @@ }, "Action":{ "shape":"ActionType", - "documentation":"

The type of handshake, indicating what action occurs when the recipient accepts the handshake. The following handshake types are supported:

  • INVITE: This type of handshake represents a request to join an organization. It is always sent from the master account to only non-member accounts.

  • ENABLE_ALL_FEATURES: This type of handshake represents a request to enable all features in an organization. It is always sent from the master account to only invited member accounts. Created accounts do not receive this because those accounts were created by the organization's master account and approval is inferred.

  • APPROVE_ALL_FEATURES: This type of handshake is sent from the Organizations service when all member accounts have approved the ENABLE_ALL_FEATURES invitation. It is sent only to the master account and signals the master that it can finalize the process to enable all features.

" + "documentation":"

The type of handshake, indicating what action occurs when the recipient accepts the handshake. The following handshake types are supported:

  • INVITE: This type of handshake represents a request to join an organization. It is always sent from the management account to only non-member accounts.

  • ENABLE_ALL_FEATURES: This type of handshake represents a request to enable all features in an organization. It is always sent from the management account to only invited member accounts. Created accounts do not receive this because those accounts were created by the organization's management account and approval is inferred.

  • APPROVE_ALL_FEATURES: This type of handshake is sent from the Organizations service when all member accounts have approved the ENABLE_ALL_FEATURES invitation. It is sent only to the management account and signals the master that it can finalize the process to enable all features.

" }, "Resources":{ "shape":"HandshakeResources", "documentation":"

Additional information that is needed to process the handshake.

" } }, - "documentation":"

Contains information that must be exchanged to securely establish a relationship between two accounts (an originator and a recipient). For example, when a master account (the originator) invites another account (the recipient) to join its organization, the two accounts exchange information as a series of handshake requests and responses.

Note: Handshakes that are CANCELED, ACCEPTED, or DECLINED show up in lists for only 30 days after entering that state After that they are deleted.

" + "documentation":"

Contains information that must be exchanged to securely establish a relationship between two accounts (an originator and a recipient). For example, when a management account (the originator) invites another account (the recipient) to join its organization, the two accounts exchange information as a series of handshake requests and responses.

Note: Handshakes that are CANCELED, ACCEPTED, or DECLINED show up in lists for only 30 days after entering that state After that they are deleted.

" }, "HandshakeAlreadyInStateException":{ "type":"structure", @@ -2103,7 +2103,7 @@ }, "ParentHandshakeId":{ "shape":"HandshakeId", - "documentation":"

Specifies the parent handshake. Only used for handshake types that are a child of another type.

If you specify ParentHandshakeId, you cannot also specify ActionType.

The regex pattern for handshake ID string requires \"h-\" followed by from 8 to 32 lower-case letters or digits.

" + "documentation":"

Specifies the parent handshake. Only used for handshake types that are a child of another type.

If you specify ParentHandshakeId, you cannot also specify ActionType.

The regex pattern for handshake ID string requires \"h-\" followed by from 8 to 32 lowercase letters or digits.

" } }, "documentation":"

Specifies the criteria that are used to select the handshakes for the operation.

" @@ -2140,7 +2140,7 @@ "members":{ "Id":{ "shape":"HandshakePartyId", - "documentation":"

The unique identifier (ID) for the party.

The regex pattern for handshake ID string requires \"h-\" followed by from 8 to 32 lower-case letters or digits.

" + "documentation":"

The unique identifier (ID) for the party.

The regex pattern for handshake ID string requires \"h-\" followed by from 8 to 32 lowercase letters or digits.

" }, "Type":{ "shape":"HandshakePartyType", @@ -2173,7 +2173,7 @@ }, "Type":{ "shape":"HandshakeResourceType", - "documentation":"

The type of information being passed, specifying how the value is to be interpreted by the other party:

  • ACCOUNT - Specifies an AWS account ID number.

  • ORGANIZATION - Specifies an organization ID number.

  • EMAIL - Specifies the email address that is associated with the account that receives the handshake.

  • OWNER_EMAIL - Specifies the email address associated with the master account. Included as information about an organization.

  • OWNER_NAME - Specifies the name associated with the master account. Included as information about an organization.

  • NOTES - Additional text provided by the handshake initiator and intended for the recipient to read.

" + "documentation":"

The type of information being passed, specifying how the value is to be interpreted by the other party:

  • ACCOUNT - Specifies an AWS account ID number.

  • ORGANIZATION - Specifies an organization ID number.

  • EMAIL - Specifies the email address that is associated with the account that receives the handshake.

  • OWNER_EMAIL - Specifies the email address associated with the management account. Included as information about an organization.

  • OWNER_NAME - Specifies the name associated with the management account. Included as information about an organization.

  • NOTES - Additional text provided by the handshake initiator and intended for the recipient to read.

" }, "Resources":{ "shape":"HandshakeResources", @@ -2644,7 +2644,7 @@ }, "Filter":{ "shape":"PolicyType", - "documentation":"

The type of policy that you want to include in the returned list. You must specify one of the following values:

" + "documentation":"

The type of policy that you want to include in the returned list. You must specify one of the following values:

" }, "NextToken":{ "shape":"NextToken", @@ -2675,7 +2675,7 @@ "members":{ "Filter":{ "shape":"PolicyType", - "documentation":"

Specifies the type of policy that you want to include in the response. You must specify one of the following values:

" + "documentation":"

Specifies the type of policy that you want to include in the response. You must specify one of the following values:

" }, "NextToken":{ "shape":"NextToken", @@ -2797,7 +2797,7 @@ "members":{ "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

You can't remove a master account from an organization. If you want the master account to become a member account in another organization, you must first delete the current organization of the master account.

", + "documentation":"

You can't remove a management account from an organization. If you want the management account to become a member account in another organization, you must first delete the current organization of the management account.

", "exception":true }, "MaxResults":{ @@ -2838,7 +2838,7 @@ "members":{ "Id":{ "shape":"OrganizationId", - "documentation":"

The unique identifier (ID) of an organization.

The regex pattern for an organization ID string requires \"o-\" followed by from 10 to 32 lower-case letters or digits.

" + "documentation":"

The unique identifier (ID) of an organization.

The regex pattern for an organization ID string requires \"o-\" followed by from 10 to 32 lowercase letters or digits.

" }, "Arn":{ "shape":"OrganizationArn", @@ -2850,15 +2850,15 @@ }, "MasterAccountArn":{ "shape":"AccountArn", - "documentation":"

The Amazon Resource Name (ARN) of the account that is designated as the master account for the organization.

For more information about ARNs in Organizations, see ARN Formats Supported by Organizations in the AWS Organizations User Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of the account that is designated as the management account for the organization.

For more information about ARNs in Organizations, see ARN Formats Supported by Organizations in the AWS Organizations User Guide.

" }, "MasterAccountId":{ "shape":"AccountId", - "documentation":"

The unique identifier (ID) of the master account of an organization.

The regex pattern for an account ID string requires exactly 12 digits.

" + "documentation":"

The unique identifier (ID) of the management account of an organization.

The regex pattern for an account ID string requires exactly 12 digits.

" }, "MasterAccountEmail":{ "shape":"Email", - "documentation":"

The email address that is associated with the AWS account that is designated as the master account for the organization.

" + "documentation":"

The email address that is associated with the AWS account that is designated as the management account for the organization.

" }, "AvailablePolicyTypes":{ "shape":"PolicyTypes", @@ -2887,7 +2887,7 @@ "members":{ "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

The organization isn't empty. To delete an organization, you must first remove all accounts except the master account, delete all OUs, and delete all policies.

", + "documentation":"

The organization isn't empty. To delete an organization, you must first remove all accounts except the management account, delete all OUs, and delete all policies.

", "exception":true }, "OrganizationalUnit":{ @@ -2895,7 +2895,7 @@ "members":{ "Id":{ "shape":"OrganizationalUnitId", - "documentation":"

The unique identifier (ID) associated with this OU.

The regex pattern for an organizational unit ID string requires \"ou-\" followed by from 4 to 32 lower-case letters or digits (the ID of the root that contains the OU). This string is followed by a second \"-\" dash and from 8 to 32 additional lower-case letters or digits.

" + "documentation":"

The unique identifier (ID) associated with this OU.

The regex pattern for an organizational unit ID string requires \"ou-\" followed by from 4 to 32 lowercase letters or digits (the ID of the root that contains the OU). This string is followed by a second \"-\" dash and from 8 to 32 additional lowercase letters or digits.

" }, "Arn":{ "shape":"OrganizationalUnitArn", @@ -2948,7 +2948,7 @@ "members":{ "Id":{ "shape":"ParentId", - "documentation":"

The unique identifier (ID) of the parent entity.

The regex pattern for a parent ID string requires one of the following:

  • Root: A string that begins with \"r-\" followed by from 4 to 32 lower-case letters or digits.

  • Organizational unit (OU): A string that begins with \"ou-\" followed by from 4 to 32 lower-case letters or digits (the ID of the root that the OU is in). This string is followed by a second \"-\" dash and from 8 to 32 additional lower-case letters or digits.

" + "documentation":"

The unique identifier (ID) of the parent entity.

The regex pattern for a parent ID string requires one of the following:

  • Root - A string that begins with \"r-\" followed by from 4 to 32 lowercase letters or digits.

  • Organizational unit (OU) - A string that begins with \"ou-\" followed by from 4 to 32 lowercase letters or digits (the ID of the root that the OU is in). This string is followed by a second \"-\" dash and from 8 to 32 additional lowercase letters or digits.

" }, "Type":{ "shape":"ParentType", @@ -3062,7 +3062,7 @@ "members":{ "Id":{ "shape":"PolicyId", - "documentation":"

The unique identifier (ID) of the policy.

The regex pattern for a policy ID string requires \"p-\" followed by from 8 to 128 lower-case letters or digits.

" + "documentation":"

The unique identifier (ID) of the policy.

The regex pattern for a policy ID string requires \"p-\" followed by from 8 to 128 lowercase or uppercase letters, digits, or the underscore character (_).

" }, "Arn":{ "shape":"PolicyArn", @@ -3097,7 +3097,7 @@ "members":{ "TargetId":{ "shape":"PolicyTargetId", - "documentation":"

The unique identifier (ID) of the policy target.

The regex pattern for a target ID string requires one of the following:

  • Root: A string that begins with \"r-\" followed by from 4 to 32 lower-case letters or digits.

  • Account: A string that consists of exactly 12 digits.

  • Organizational unit (OU): A string that begins with \"ou-\" followed by from 4 to 32 lower-case letters or digits (the ID of the root that the OU is in). This string is followed by a second \"-\" dash and from 8 to 32 additional lower-case letters or digits.

" + "documentation":"

The unique identifier (ID) of the policy target.

The regex pattern for a target ID string requires one of the following:

  • Root - A string that begins with \"r-\" followed by from 4 to 32 lowercase letters or digits.

  • Account - A string that consists of exactly 12 digits.

  • Organizational unit (OU) - A string that begins with \"ou-\" followed by from 4 to 32 lowercase letters or digits (the ID of the root that the OU is in). This string is followed by a second \"-\" dash and from 8 to 32 additional lowercase letters or digits.

" }, "Arn":{ "shape":"GenericArn", @@ -3214,7 +3214,7 @@ "members":{ "Id":{ "shape":"RootId", - "documentation":"

The unique identifier (ID) for the root.

The regex pattern for a root ID string requires \"r-\" followed by from 4 to 32 lower-case letters or digits.

" + "documentation":"

The unique identifier (ID) for the root.

The regex pattern for a root ID string requires \"r-\" followed by from 4 to 32 lowercase letters or digits.

" }, "Arn":{ "shape":"RootArn", @@ -3229,7 +3229,7 @@ "documentation":"

The types of policies that are currently enabled for the root and therefore can be attached to the root or to its OUs or accounts.

Even if a policy type is shown as available in the organization, you can separately enable and disable them at the root level by using EnablePolicyType and DisablePolicyType. Use DescribeOrganization to see the availability of the policy types in that organization.

" } }, - "documentation":"

Contains details about a root. A root is a top-level parent node in the hierarchy of an organization that can contain organizational units (OUs) and accounts. Every root contains every AWS account in the organization. Each root enables the accounts to be organized in a different way and to have different policy types enabled for use in that root.

" + "documentation":"

Contains details about a root. A root is a top-level parent node in the hierarchy of an organization that can contain organizational units (OUs) and accounts. The root contains every AWS account in the organization.

" }, "RootArn":{ "type":"string", diff --git a/services/outposts/pom.xml b/services/outposts/pom.xml index 9430ccf2d809..6eea3176f989 100644 --- a/services/outposts/pom.xml +++ b/services/outposts/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT outposts AWS Java SDK :: Services :: Outposts diff --git a/services/outposts/src/main/resources/codegen-resources/service-2.json b/services/outposts/src/main/resources/codegen-resources/service-2.json index 592e20363153..2c119ca151a9 100644 --- a/services/outposts/src/main/resources/codegen-resources/service-2.json +++ b/services/outposts/src/main/resources/codegen-resources/service-2.json @@ -157,13 +157,20 @@ }, "CreateOutpostInput":{ "type":"structure", - "required":["SiteId"], + "required":[ + "Name", + "SiteId" + ], "members":{ "Name":{"shape":"OutpostName"}, "Description":{"shape":"OutpostDescription"}, "SiteId":{"shape":"SiteId"}, "AvailabilityZone":{"shape":"AvailabilityZone"}, - "AvailabilityZoneId":{"shape":"AvailabilityZoneId"} + "AvailabilityZoneId":{"shape":"AvailabilityZoneId"}, + "Tags":{ + "shape":"TagMap", + "documentation":"

The tags to apply to the Outpost.

" + } } }, "CreateOutpostOutput":{ @@ -357,7 +364,11 @@ "Description":{"shape":"OutpostDescription"}, "LifeCycleStatus":{"shape":"LifeCycleStatus"}, "AvailabilityZone":{"shape":"AvailabilityZone"}, - "AvailabilityZoneId":{"shape":"AvailabilityZoneId"} + "AvailabilityZoneId":{"shape":"AvailabilityZoneId"}, + "Tags":{ + "shape":"TagMap", + "documentation":"

The Outpost tags.

" + } }, "documentation":"

Information about an Outpost.

" }, @@ -411,7 +422,11 @@ "SiteId":{"shape":"SiteId"}, "AccountId":{"shape":"AccountId"}, "Name":{"shape":"SiteName"}, - "Description":{"shape":"SiteDescription"} + "Description":{"shape":"SiteDescription"}, + "Tags":{ + "shape":"TagMap", + "documentation":"

The site tags.

" + } }, "documentation":"

Information about a site.

" }, @@ -436,6 +451,24 @@ "min":1, "pattern":"^[\\S ]+$" }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^(?!aws:)[a-zA-Z+-=._:/]+$" + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":50, + "min":1 + }, + "TagValue":{ + "type":"string", + "max":256, + "pattern":"^[\\S \\n]+$" + }, "Token":{ "type":"string", "documentation":"

The pagination token.

", diff --git a/services/personalize/pom.xml b/services/personalize/pom.xml index 8e2bd766ceed..d6c3bbd9aeea 100644 --- a/services/personalize/pom.xml +++ b/services/personalize/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT personalize AWS Java SDK :: Services :: Personalize diff --git a/services/personalizeevents/pom.xml b/services/personalizeevents/pom.xml index 2ef36857d28b..b410cd9665bb 100644 --- a/services/personalizeevents/pom.xml +++ b/services/personalizeevents/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT personalizeevents AWS Java SDK :: Services :: Personalize Events diff --git a/services/personalizeruntime/pom.xml b/services/personalizeruntime/pom.xml index f09e1b5ff0fb..2bc41f07970a 100644 --- a/services/personalizeruntime/pom.xml +++ b/services/personalizeruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT personalizeruntime AWS Java SDK :: Services :: Personalize Runtime diff --git a/services/personalizeruntime/src/main/resources/codegen-resources/service-2.json b/services/personalizeruntime/src/main/resources/codegen-resources/service-2.json index 1381a37f42ab..d8a8c3b84a3f 100644 --- a/services/personalizeruntime/src/main/resources/codegen-resources/service-2.json +++ b/services/personalizeruntime/src/main/resources/codegen-resources/service-2.json @@ -66,6 +66,22 @@ "max":150 }, "ErrorMessage":{"type":"string"}, + "FilterAttributeName":{ + "type":"string", + "max":50, + "pattern":"[A-Za-z0-9]+" + }, + "FilterAttributeValue":{ + "type":"string", + "max":1000, + "sensitive":true + }, + "FilterValues":{ + "type":"map", + "key":{"shape":"FilterAttributeName"}, + "value":{"shape":"FilterAttributeValue"}, + "max":25 + }, "GetPersonalizedRankingRequest":{ "type":"structure", "required":[ @@ -92,7 +108,11 @@ }, "filterArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of a filter you created to include or exclude items from recommendations for a given user.

" + "documentation":"

The Amazon Resource Name (ARN) of a filter you created to include items or exclude items from recommendations for a given user. For more information, see Filtering Recommendations.

" + }, + "filterValues":{ + "shape":"FilterValues", + "documentation":"

The values to use when filtering recommendations. For each placeholder parameter in your filter expression, provide the parameter name (in matching case) as a key and the filter value(s) as the corresponding value. Separate multiple values for one parameter with a comma.

For filter expressions that use an INCLUDE element to include items, you must provide values for all parameters that are defined in the expression. For filters with expressions that use an EXCLUDE element to exclude items, you can omit the filter-values.In this case, Amazon Personalize doesn't use that portion of the expression to filter recommendations.

For more information, see Filtering Recommendations.

" } } }, @@ -135,7 +155,11 @@ }, "filterArn":{ "shape":"Arn", - "documentation":"

The ARN of the filter to apply to the returned recommendations. For more information, see Using Filters with Amazon Personalize.

When using this parameter, be sure the filter resource is ACTIVE.

" + "documentation":"

The ARN of the filter to apply to the returned recommendations. For more information, see Filtering Recommendations.

When using this parameter, be sure the filter resource is ACTIVE.

" + }, + "filterValues":{ + "shape":"FilterValues", + "documentation":"

The values to use when filtering recommendations. For each placeholder parameter in your filter expression, provide the parameter name (in matching case) as a key and the filter value(s) as the corresponding value. Separate multiple values for one parameter with a comma.

For filter expressions that use an INCLUDE element to include items, you must provide values for all parameters that are defined in the expression. For filters with expressions that use an EXCLUDE element to exclude items, you can omit the filter-values.In this case, Amazon Personalize doesn't use that portion of the expression to filter recommendations.

For more information, see Filtering Recommendations.

" } } }, diff --git a/services/pi/pom.xml b/services/pi/pom.xml index 6baf708157a5..c579c8f1bfe3 100644 --- a/services/pi/pom.xml +++ b/services/pi/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT pi AWS Java SDK :: Services :: PI diff --git a/services/pinpoint/pom.xml b/services/pinpoint/pom.xml index e70ad1f7c35f..3ffc95b74c30 100644 --- a/services/pinpoint/pom.xml +++ b/services/pinpoint/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT pinpoint AWS Java SDK :: Services :: Amazon Pinpoint diff --git a/services/pinpoint/src/main/resources/codegen-resources/customization.config b/services/pinpoint/src/main/resources/codegen-resources/customization.config index 6abaf75b14b1..2c471c083b7f 100644 --- a/services/pinpoint/src/main/resources/codegen-resources/customization.config +++ b/services/pinpoint/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,7 @@ { - "blacklistedSimpleMethods" : ["*"] + "blacklistedSimpleMethods" : ["*"], + "renameShapes": { + // Do not keep adding to this list. Require the service team to name enums like they're naming their shapes. + "__EndpointTypesElement": "EndpointTypesElement" + } } diff --git a/services/pinpointemail/pom.xml b/services/pinpointemail/pom.xml index 1bf7dac494f5..89797921523a 100644 --- a/services/pinpointemail/pom.xml +++ b/services/pinpointemail/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT pinpointemail AWS Java SDK :: Services :: Pinpoint Email diff --git a/services/pinpointsmsvoice/pom.xml b/services/pinpointsmsvoice/pom.xml index bb2a2c91de0b..154f49a3ef85 100644 --- a/services/pinpointsmsvoice/pom.xml +++ b/services/pinpointsmsvoice/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT pinpointsmsvoice AWS Java SDK :: Services :: Pinpoint SMS Voice diff --git a/services/polly/pom.xml b/services/polly/pom.xml index 8d055891aaa0..42fb29fe26e5 100644 --- a/services/polly/pom.xml +++ b/services/polly/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT polly AWS Java SDK :: Services :: Amazon Polly diff --git a/services/polly/src/main/resources/codegen-resources/service-2.json b/services/polly/src/main/resources/codegen-resources/service-2.json index 314149d34c98..2617de828982 100644 --- a/services/polly/src/main/resources/codegen-resources/service-2.json +++ b/services/polly/src/main/resources/codegen-resources/service-2.json @@ -478,7 +478,10 @@ }, "documentation":"

Contains metadata describing the lexicon such as the number of lexemes, language code, and so on. For more information, see Managing Lexicons.

" }, - "LexiconContent":{"type":"string"}, + "LexiconContent":{ + "type":"string", + "sensitive":true + }, "LexiconDescription":{ "type":"structure", "members":{ @@ -499,8 +502,7 @@ }, "LexiconName":{ "type":"string", - "pattern":"[0-9A-Za-z]{1,20}", - "sensitive":true + "pattern":"[0-9A-Za-z]{1,20}" }, "LexiconNameList":{ "type":"list", @@ -637,7 +639,7 @@ }, "OutputS3KeyPrefix":{ "type":"string", - "pattern":"^[0-9a-zA-Z\\/\\!\\-_\\.\\*\\'\\(\\)]{0,800}$" + "pattern":"^[0-9a-zA-Z\\/\\!\\-_\\.\\*\\'\\(\\):;\\$@=+\\,\\?&]{0,800}$" }, "OutputUri":{"type":"string"}, "PutLexiconInput":{ @@ -861,7 +863,7 @@ "members":{ "Engine":{ "shape":"Engine", - "documentation":"

Specifies the engine (standard or neural) for Amazon Polly to use when processing input text for speech synthesis. Using a voice that is not supported for the engine selected will result in an error.

" + "documentation":"

Specifies the engine (standard or neural) for Amazon Polly to use when processing input text for speech synthesis. For information on Amazon Polly voices and which voices are available in standard-only, NTTS-only, and both standard and NTTS formats, see Available Voices.

NTTS-only voices

When using NTTS-only voices such as Kevin (en-US), this parameter is required and must be set to neural. If the engine is not specified, or is set to standard, this will result in an error.

Type: String

Valid Values: standard | neural

Required: Yes

Standard voices

For standard voices, this is not required; the engine parameter defaults to standard. If the engine is not specified, or is set to standard and an NTTS-only voice is selected, this will result in an error.

" }, "LanguageCode":{ "shape":"LanguageCode", @@ -1053,6 +1055,7 @@ "Mizuki", "Naja", "Nicole", + "Olivia", "Penelope", "Raveena", "Ricardo", diff --git a/services/pom.xml b/services/pom.xml index 248d4ddc318c..875b03103dce 100644 --- a/services/pom.xml +++ b/services/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT services AWS Java SDK :: Services @@ -253,6 +253,18 @@ timestreamwrite timestreamquery s3outposts + databrew + servicecatalogappregistry + networkfirewall + mwaa + devopsguru + sagemakerfeaturestoreruntime + appintegrations + ecrpublic + amplifybackend + connectcontactlens + lookoutvision + customerprofiles The AWS Java SDK services https://aws.amazon.com/sdkforjava diff --git a/services/pricing/pom.xml b/services/pricing/pom.xml index c34bad85edc4..80de0acb84ea 100644 --- a/services/pricing/pom.xml +++ b/services/pricing/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT 4.0.0 pricing diff --git a/services/qldb/pom.xml b/services/qldb/pom.xml index b720735298a1..4c05acc2973b 100644 --- a/services/qldb/pom.xml +++ b/services/qldb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT qldb AWS Java SDK :: Services :: QLDB diff --git a/services/qldbsession/pom.xml b/services/qldbsession/pom.xml index f7cda1e22698..c5f81f4d98d9 100644 --- a/services/qldbsession/pom.xml +++ b/services/qldbsession/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT qldbsession AWS Java SDK :: Services :: QLDB Session diff --git a/services/quicksight/pom.xml b/services/quicksight/pom.xml index bf39cbb9fcf0..76ed85c9c9fe 100644 --- a/services/quicksight/pom.xml +++ b/services/quicksight/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT quicksight AWS Java SDK :: Services :: QuickSight diff --git a/services/quicksight/src/main/resources/codegen-resources/service-2.json b/services/quicksight/src/main/resources/codegen-resources/service-2.json index 045e9b15fd7b..5485e4030ec2 100644 --- a/services/quicksight/src/main/resources/codegen-resources/service-2.json +++ b/services/quicksight/src/main/resources/codegen-resources/service-2.json @@ -184,7 +184,7 @@ {"shape":"ConcurrentUpdatingException"}, {"shape":"InternalFailureException"} ], - "documentation":"

Creates an assignment with one specified IAM policy, identified by its Amazon Resource Name (ARN). This policy will be assigned to specified groups or users of Amazon QuickSight. The users and groups need to be in the same namespace.

" + "documentation":"

Creates an assignment with one specified IAM policy, identified by its Amazon Resource Name (ARN). This policy assignment is attached to the specified groups or users of Amazon QuickSight. Assignment names are unique per AWS account. To avoid overwriting rules in other namespaces, use assignment names that are unique.

" }, "CreateIngestion":{ "name":"CreateIngestion", @@ -977,6 +977,7 @@ {"shape":"IdentityTypeNotSupportedException"}, {"shape":"SessionLifetimeInMinutesInvalidException"}, {"shape":"UnsupportedUserEditionException"}, + {"shape":"UnsupportedPricingPlanException"}, {"shape":"InternalFailureException"} ], "documentation":"

Generates a session URL and authorization code that you can use to embed an Amazon QuickSight read-only dashboard in your web server code. Before you use this command, make sure that you have configured the dashboards and permissions.

Currently, you can use GetDashboardEmbedURL only from the server, not from the user's browser. The following rules apply to the combination of URL and authorization code:

  • They must be used together.

  • They can be used one time only.

  • They are valid for 5 minutes after you run this command.

  • The resulting user session is valid for 10 hours.

For more information, see Embedding Amazon QuickSight in the Amazon QuickSight User Guide .

" @@ -1716,7 +1717,7 @@ {"shape":"ConcurrentUpdatingException"}, {"shape":"InternalFailureException"} ], - "documentation":"

Updates an existing IAM policy assignment. This operation updates only the optional parameter or parameters that are specified in the request.

" + "documentation":"

Updates an existing IAM policy assignment. This operation updates only the optional parameter or parameters that are specified in the request. This overwrites all of the users included in Identities.

" }, "UpdateTemplate":{ "name":"UpdateTemplate", @@ -1931,6 +1932,12 @@ }, "documentation":"

Ad hoc (one-time) filtering option.

" }, + "AdditionalDashboardIdList":{ + "type":"list", + "member":{"shape":"RestrictiveResourceId"}, + "max":20, + "min":1 + }, "AliasName":{ "type":"string", "max":2048, @@ -2346,6 +2353,21 @@ "DATETIME" ] }, + "ColumnDescription":{ + "type":"structure", + "members":{ + "Text":{ + "shape":"ColumnDescriptiveText", + "documentation":"

The text of a description for a column.

" + } + }, + "documentation":"

Metadata that contains a description for a column.

" + }, + "ColumnDescriptiveText":{ + "type":"string", + "max":500, + "min":0 + }, "ColumnGroup":{ "type":"structure", "members":{ @@ -2406,6 +2428,25 @@ "max":64, "min":1 }, + "ColumnLevelPermissionRule":{ + "type":"structure", + "members":{ + "Principals":{ + "shape":"PrincipalList", + "documentation":"

An array of Amazon Resource Names (ARNs) for QuickSight users or groups.

" + }, + "ColumnNames":{ + "shape":"ColumnNameList", + "documentation":"

An array of column names.

" + } + }, + "documentation":"

A rule defined to grant access on one or more restricted columns. Each dataset can have multiple rules. To create a restricted column, you add it to one or more rules. Each rule must contain at least one column and at least one user or group. To be able to see a restricted column, a user or group needs to be added to a rule for that column.

" + }, + "ColumnLevelPermissionRuleList":{ + "type":"list", + "member":{"shape":"ColumnLevelPermissionRule"}, + "min":1 + }, "ColumnList":{ "type":"list", "member":{"shape":"ColumnName"}, @@ -2417,6 +2458,11 @@ "max":128, "min":1 }, + "ColumnNameList":{ + "type":"list", + "member":{"shape":"String"}, + "min":1 + }, "ColumnSchema":{ "type":"structure", "members":{ @@ -2446,6 +2492,10 @@ "ColumnGeographicRole":{ "shape":"GeoSpatialDataRole", "documentation":"

A geospatial role for a column.

" + }, + "ColumnDescription":{ + "shape":"ColumnDescription", + "documentation":"

A description for a column.

" } }, "documentation":"

A tag for a column in a TagColumnOperation structure. This is a variant type structure. For this structure to be valid, only one of the attributes can be non-null.

" @@ -2759,6 +2809,10 @@ "shape":"RowLevelPermissionDataSet", "documentation":"

The row-level security configuration for the data that you want to create.

" }, + "ColumnLevelPermissionRules":{ + "shape":"ColumnLevelPermissionRuleList", + "documentation":"

A set of one or more definitions of a ColumnLevelPermissionRule .

" + }, "Tags":{ "shape":"TagList", "documentation":"

Contains a map of the key-value pairs for the resource tag or tags assigned to the dataset.

" @@ -2994,7 +3048,7 @@ }, "AssignmentName":{ "shape":"IAMPolicyAssignmentName", - "documentation":"

The name of the assignment. It must be unique within an AWS account.

" + "documentation":"

The name of the assignment, also called a rule. It must be unique within an AWS account.

" }, "AssignmentStatus":{ "shape":"AssignmentStatus", @@ -3435,7 +3489,7 @@ }, "AlternateDataSourceParameters":{ "shape":"DataSourceParametersList", - "documentation":"

A set of alternate data source parameters that you want to share for these credentials. The credentials are applied in tandem with the data source parameters when you copy a data source by using a create or update request. The API operation compares the DataSourceParameters structure that's in the request with the structures in the AlternateDataSourceParameters allowlist. If the structures are an exact match, the request is allowed to use the new data source with the existing credentials. If the AlternateDataSourceParameters list is null, the DataSourceParameters originally used with these Credentials is automatically allowed.

" + "documentation":"

A set of alternate data source parameters that you want to share for these credentials. The credentials are applied in tandem with the data source parameters when you copy a data source by using a create or update request. The API operation compares the DataSourceParameters structure that's in the request with the structures in the AlternateDataSourceParameters allow list. If the structures are an exact match, the request is allowed to use the new data source with the existing credentials. If the AlternateDataSourceParameters list is null, the DataSourceParameters originally used with these Credentials is automatically allowed.

" } }, "documentation":"

The combination of user name and password that are used as credentials.

" @@ -3770,7 +3824,7 @@ "documentation":"

The hexadecimal code of a color that applies to charts where a lack of data is highlighted.

" } }, - "documentation":"

The theme colors that are used for data colors in charts. The colors description is a hexidecimal color code that consists of six alphanumerical characters, prefixed with #, for example #37BFF5.

" + "documentation":"

The theme colors that are used for data colors in charts. The colors description is a hexadecimal color code that consists of six alphanumerical characters, prefixed with #, for example #37BFF5.

" }, "DataSet":{ "type":"structure", @@ -3822,6 +3876,10 @@ "RowLevelPermissionDataSet":{ "shape":"RowLevelPermissionDataSet", "documentation":"

The row-level security configuration for the dataset.

" + }, + "ColumnLevelPermissionRules":{ + "shape":"ColumnLevelPermissionRuleList", + "documentation":"

A set of one or more definitions of a ColumnLevelPermissionRule .

" } }, "documentation":"

Dataset.

" @@ -3929,6 +3987,10 @@ "RowLevelPermissionDataSet":{ "shape":"RowLevelPermissionDataSet", "documentation":"

The row-level security configuration for the dataset.

" + }, + "ColumnLevelPermissionRulesApplied":{ + "shape":"Boolean", + "documentation":"

Indicates if the dataset has column level permission configured.

" } }, "documentation":"

Dataset summary.

" @@ -3974,7 +4036,7 @@ }, "AlternateDataSourceParameters":{ "shape":"DataSourceParametersList", - "documentation":"

A set of alternate data source parameters that you want to share for the credentials stored with this data source. The credentials are applied in tandem with the data source parameters when you copy a data source by using a create or update request. The API operation compares the DataSourceParameters structure that's in the request with the structures in the AlternateDataSourceParameters allowlist. If the structures are an exact match, the request is allowed to use the credentials from this existing data source. If the AlternateDataSourceParameters list is null, the Credentials originally used with this DataSourceParameters are automatically allowed.

" + "documentation":"

A set of alternate data source parameters that you want to share for the credentials stored with this data source. The credentials are applied in tandem with the data source parameters when you copy a data source by using a create or update request. The API operation compares the DataSourceParameters structure that's in the request with the structures in the AlternateDataSourceParameters allow list. If the structures are an exact match, the request is allowed to use the credentials from this existing data source. If the AlternateDataSourceParameters list is null, the Credentials originally used with this DataSourceParameters are automatically allowed.

" }, "VpcConnectionProperties":{ "shape":"VpcConnectionProperties", @@ -4072,6 +4134,10 @@ "shape":"MySqlParameters", "documentation":"

MySQL parameters.

" }, + "OracleParameters":{ + "shape":"OracleParameters", + "documentation":"

Oracle parameters.

" + }, "PostgreSqlParameters":{ "shape":"PostgreSqlParameters", "documentation":"

PostgreSQL parameters.

" @@ -4138,6 +4204,7 @@ "JIRA", "MARIADB", "MYSQL", + "ORACLE", "POSTGRESQL", "PRESTO", "REDSHIFT", @@ -5399,7 +5466,7 @@ }, "AssignmentName":{ "shape":"IAMPolicyAssignmentName", - "documentation":"

The name of the assignment.

", + "documentation":"

The name of the assignment, also called a rule.

", "location":"uri", "locationName":"AssignmentName" }, @@ -5878,6 +5945,14 @@ "ENTERPRISE" ] }, + "EmbeddingIdentityType":{ + "type":"string", + "enum":[ + "IAM", + "QUICKSIGHT", + "ANONYMOUS" + ] + }, "EmbeddingUrl":{ "type":"string", "sensitive":true @@ -6016,7 +6091,7 @@ "locationName":"DashboardId" }, "IdentityType":{ - "shape":"IdentityType", + "shape":"EmbeddingIdentityType", "documentation":"

The authentication method that the user uses to sign in.

", "location":"querystring", "locationName":"creds-type" @@ -6028,24 +6103,43 @@ "locationName":"session-lifetime" }, "UndoRedoDisabled":{ - "shape":"boolean", + "shape":"Boolean", "documentation":"

Remove the undo/redo button on the embedded dashboard. The default is FALSE, which enables the undo/redo button.

", "location":"querystring", "locationName":"undo-redo-disabled" }, "ResetDisabled":{ - "shape":"boolean", + "shape":"Boolean", "documentation":"

Remove the reset button on the embedded dashboard. The default is FALSE, which enables the reset button.

", "location":"querystring", "locationName":"reset-disabled" }, + "StatePersistenceEnabled":{ + "shape":"Boolean", + "documentation":"

Adds persistence of state for the user session in an embedded dashboard. Persistence applies to the sheet and the parameter settings. These are control settings that the dashboard subscriber (QuickSight reader) chooses while viewing the dashboard. If this is set to TRUE, the settings are the same when the the subscriber reopens the same dashboard URL. The state is stored in QuickSight, not in a browser cookie. If this is set to FALSE, the state of the user session is not persisted. The default is FALSE.

", + "location":"querystring", + "locationName":"state-persistence-enabled" + }, "UserArn":{ "shape":"Arn", "documentation":"

The Amazon QuickSight user's Amazon Resource Name (ARN), for use with QUICKSIGHT identity type. You can use this for any Amazon QuickSight users in your account (readers, authors, or admins) authenticated as one of the following:

  • Active Directory (AD) users or group members

  • Invited nonfederated users

  • IAM users and IAM role-based sessions authenticated through Federated Single Sign-On using SAML, OpenID Connect, or IAM federation.

Omit this parameter for users in the third group – IAM users and IAM role-based sessions.

", "location":"querystring", "locationName":"user-arn" + }, + "Namespace":{ + "shape":"Namespace", + "documentation":"

The QuickSight namespace that contains the dashboard IDs in this request. If you're not using a custom namespace, set this to \"default\".

", + "location":"querystring", + "locationName":"namespace" + }, + "AdditionalDashboardIds":{ + "shape":"AdditionalDashboardIdList", + "documentation":"

A list of one or more dashboard ids that you want to add to a session that includes anonymous authorizations. IdentityType must be set to ANONYMOUS for this to work, because other other identity types authenticate as QuickSight users. For example, if you set \"--dashboard-id dash_id1 --dashboard-id dash_id2 dash_id3 identity-type ANONYMOUS\", the session can access all three dashboards.

", + "location":"querystring", + "locationName":"additional-dashboard-ids" } - } + }, + "documentation":"

Parameter input for the GetDashboardEmbedUrl operation.

" }, "GetDashboardEmbedUrlResponse":{ "type":"structure", @@ -6063,7 +6157,8 @@ "shape":"String", "documentation":"

The AWS request ID for this operation.

" } - } + }, + "documentation":"

Output returned from the GetDashboardEmbedUrl operation.

" }, "GetSessionEmbedUrlRequest":{ "type":"structure", @@ -7876,6 +7971,29 @@ "max":65535, "min":0 }, + "OracleParameters":{ + "type":"structure", + "required":[ + "Host", + "Port", + "Database" + ], + "members":{ + "Host":{ + "shape":"Host", + "documentation":"

An Oracle host.

" + }, + "Port":{ + "shape":"Port", + "documentation":"

Port.

" + }, + "Database":{ + "shape":"Database", + "documentation":"

Database.

" + } + }, + "documentation":"

Oracle parameters.

" + }, "OutputColumn":{ "type":"structure", "members":{ @@ -7883,6 +8001,10 @@ "shape":"ColumnName", "documentation":"

A display name for the dataset.

" }, + "Description":{ + "shape":"ColumnDescriptiveText", + "documentation":"

A description for a column.

" + }, "Type":{ "shape":"ColumnDataType", "documentation":"

Type.

" @@ -8025,6 +8147,12 @@ "max":256, "min":1 }, + "PrincipalList":{ + "type":"list", + "member":{"shape":"String"}, + "max":100, + "min":1 + }, "ProjectOperation":{ "type":"structure", "required":["ProjectedColumns"], @@ -8301,11 +8429,11 @@ "members":{ "Principal":{ "shape":"Principal", - "documentation":"

The Amazon Resource Name (ARN) of the principal. This can be one of the following:

  • The ARN of an Amazon QuickSight user, group, or namespace. (This is most common.)

  • The ARN of an AWS account root: This is an IAM ARN rather than a QuickSight ARN. Use this option only to share resources (templates) across AWS accounts. (This is less common.)

" + "documentation":"

The Amazon Resource Name (ARN) of the principal. This can be one of the following:

  • The ARN of an Amazon QuickSight user or group associated with a data source or dataset. (This is common.)

  • The ARN of an Amazon QuickSight user, group, or namespace associated with an analysis, dashboard, template, or theme. (This is common.)

  • The ARN of an AWS account root: This is an IAM ARN rather than a QuickSight ARN. Use this option only to share resources (templates) across AWS accounts. (This is less common.)

" }, "Actions":{ "shape":"ActionList", - "documentation":"

The IAM action to grant or revoke permissions on, for example \"quicksight:DescribeDashboard\".

" + "documentation":"

The IAM action to grant or revoke permissions on.

" } }, "documentation":"

Permission for the resource.

" @@ -8632,10 +8760,10 @@ }, "Name":{ "shape":"NonEmptyString", - "documentation":"

The name of a sheet. This is displayed on the sheet's tab in the QuickSight console.

" + "documentation":"

The name of a sheet. This name is displayed on the sheet's tab in the QuickSight console.

" } }, - "documentation":"

A sheet is an object that contains a set of visuals that are viewed together on one page in the Amazon QuickSight console. Every analysis and dashboard contains at least one sheet. Each sheet contains at least one visualization widget, for example a chart, pivot table, or narrative insight. Sheets can be associated with other components, such as controls, filters, and so on.

" + "documentation":"

A sheet, which is an object that contains a set of visuals that are viewed together on one page in the Amazon QuickSight console. Every analysis and dashboard contains at least one sheet. Each sheet contains at least one visualization widget, for example a chart, pivot table, or narrative insight. Sheets can be associated with other components, such as controls, filters, and so on.

" }, "SheetControlsOption":{ "type":"structure", @@ -9518,7 +9646,20 @@ "documentation":"

The foreground color that applies to any text or other elements that appear over the measure color.

" } }, - "documentation":"

The theme colors that apply to UI and to charts, excluding data colors. The colors description is a hexidecimal color code that consists of six alphanumerical characters, prefixed with #, for example #37BFF5. For more information, see Using Themes in Amazon QuickSight in the Amazon QuickSight User Guide.

" + "documentation":"

The theme colors that apply to UI and to charts, excluding data colors. The colors description is a hexadecimal color code that consists of six alphanumerical characters, prefixed with #, for example #37BFF5. For more information, see Using Themes in Amazon QuickSight in the Amazon QuickSight User Guide.

" + }, + "UnsupportedPricingPlanException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"}, + "RequestId":{ + "shape":"String", + "documentation":"

The AWS request ID for this request.

" + } + }, + "documentation":"

This error indicates that you are calling an embedding operation in Amazon QuickSight without the required pricing plan on your AWS account. Before you can use anonymous embedding, a QuickSight administrator needs to add capacity pricing to QuickSight. You can do this on the Manage QuickSight page.

After capacity pricing is added, you can enable anonymous embedding by using the GetDashboardEmbedUrl API operation with the --identity-type ANONYMOUS option.

", + "error":{"httpStatusCode":403}, + "exception":true }, "UnsupportedUserEditionException":{ "type":"structure", @@ -10057,6 +10198,10 @@ "RowLevelPermissionDataSet":{ "shape":"RowLevelPermissionDataSet", "documentation":"

The row-level security configuration for the data you want to create.

" + }, + "ColumnLevelPermissionRules":{ + "shape":"ColumnLevelPermissionRuleList", + "documentation":"

A set of one or more definitions of a ColumnLevelPermissionRule .

" } } }, @@ -10269,13 +10414,13 @@ "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID of the AWS account that contains the IAM policy assignment.

", + "documentation":"

The ID of the AWS account that contains the IAM policy assignment.

", "location":"uri", "locationName":"AwsAccountId" }, "AssignmentName":{ "shape":"IAMPolicyAssignmentName", - "documentation":"

The name of the assignment. This name must be unique within an AWS account.

", + "documentation":"

The name of the assignment, also called a rule. This name must be unique within an AWS account.

", "location":"uri", "locationName":"AssignmentName" }, @@ -10304,7 +10449,7 @@ "members":{ "AssignmentName":{ "shape":"IAMPolicyAssignmentName", - "documentation":"

The name of the assignment.

" + "documentation":"

The name of the assignment or rule.

" }, "AssignmentId":{ "shape":"String", diff --git a/services/ram/pom.xml b/services/ram/pom.xml index d62f48dd3b38..440c8398b93a 100644 --- a/services/ram/pom.xml +++ b/services/ram/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT ram AWS Java SDK :: Services :: RAM diff --git a/services/rds/pom.xml b/services/rds/pom.xml index ae28ed9db696..77b0e340da80 100644 --- a/services/rds/pom.xml +++ b/services/rds/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT rds AWS Java SDK :: Services :: Amazon RDS diff --git a/services/rds/src/main/resources/codegen-resources/service-2.json b/services/rds/src/main/resources/codegen-resources/service-2.json index b300f8cfce0b..f6291423835f 100755 --- a/services/rds/src/main/resources/codegen-resources/service-2.json +++ b/services/rds/src/main/resources/codegen-resources/service-2.json @@ -219,9 +219,10 @@ {"shape":"DBSnapshotNotFoundFault"}, {"shape":"InvalidDBSnapshotStateFault"}, {"shape":"SnapshotQuotaExceededFault"}, - {"shape":"KMSKeyNotAccessibleFault"} + {"shape":"KMSKeyNotAccessibleFault"}, + {"shape":"CustomAvailabilityZoneNotFoundFault"} ], - "documentation":"

Copies the specified DB snapshot. The source DB snapshot must be in the available or storage-optimization state.

You can copy a snapshot from one AWS Region to another. In that case, the AWS Region where you call the CopyDBSnapshot action is the destination AWS Region for the DB snapshot copy.

For more information about copying snapshots, see Copying a DB Snapshot in the Amazon RDS User Guide.

" + "documentation":"

Copies the specified DB snapshot. The source DB snapshot must be in the available state.

You can copy a snapshot from one AWS Region to another. In that case, the AWS Region where you call the CopyDBSnapshot action is the destination AWS Region for the DB snapshot copy.

For more information about copying snapshots, see Copying a DB Snapshot in the Amazon RDS User Guide.

" }, "CopyOptionGroup":{ "name":"CopyOptionGroup", @@ -488,7 +489,7 @@ {"shape":"DBInstanceNotFoundFault"}, {"shape":"SnapshotQuotaExceededFault"} ], - "documentation":"

Creates a DBSnapshot. The source DBInstance must be in \"available\" state.

" + "documentation":"

Creates a snapshot of a DB instance. The source DB instance must be in the available or storage-optimizationstate.

" }, "CreateDBSubnetGroup":{ "name":"CreateDBSubnetGroup", @@ -3041,6 +3042,10 @@ "OptionGroupName":{ "shape":"String", "documentation":"

The name of an option group to associate with the copy of the snapshot.

Specify this option if you are copying a snapshot from one AWS Region to another, and your DB instance uses a nondefault option group. If your source DB instance uses Transparent Data Encryption for Oracle or Microsoft SQL Server, you must specify this option when copying across AWS Regions. For more information, see Option Group Considerations in the Amazon RDS User Guide.

" + }, + "TargetCustomAvailabilityZone":{ + "shape":"String", + "documentation":"

The external custom Availability Zone (CAZ) identifier for the target CAZ.

Example: rds-caz-aiqhTgQv.

" } }, "documentation":"

" @@ -3249,7 +3254,7 @@ }, "EngineMode":{ "shape":"String", - "documentation":"

The DB engine mode of the DB cluster, either provisioned, serverless, parallelquery, global, or multimaster.

global engine mode only applies for global database clusters created with Aurora MySQL version 5.6.10a. For higher Aurora MySQL versions, the clusters in a global database use provisioned engine mode.

Limitations and requirements apply to some DB engine modes. For more information, see the following sections in the Amazon Aurora User Guide:

" + "documentation":"

The DB engine mode of the DB cluster, either provisioned serverless, parallelquery, global, or multimaster.

The parallelquery engine mode isn't required for Aurora MySQL version 1.23 and higher 1.x versions, and version 2.09 and higher 2.x versions.

The global engine mode isn't required for Aurora MySQL version 1.22 and higher 1.x versions, and global engine mode isn't required for any 2.x versions.

The multimaster engine mode only applies for DB clusters created with Aurora MySQL version 5.6.10a.

For Aurora PostgreSQL, the global engine mode isn't required, and both the parallelquery and the multimaster engine modes currently aren't supported.

Limitations and requirements apply to some DB engine modes. For more information, see the following sections in the Amazon Aurora User Guide:

" }, "ScalingConfiguration":{ "shape":"ScalingConfiguration", @@ -3519,7 +3524,7 @@ }, "EnableIAMDatabaseAuthentication":{ "shape":"BooleanOptional", - "documentation":"

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled.

You can enable IAM database authentication for the following database engines:

Amazon Aurora

Not applicable. Mapping AWS IAM accounts to database accounts is managed by the DB cluster.

MySQL

  • For MySQL 5.6, minor version 5.6.34 or higher

  • For MySQL 5.7, minor version 5.7.16 or higher

  • For MySQL 8.0, minor version 8.0.16 or higher

PostgreSQL

  • For PostgreSQL 9.5, minor version 9.5.15 or higher

  • For PostgreSQL 9.6, minor version 9.6.11 or higher

  • PostgreSQL 10.6, 10.7, and 10.9

For more information, see IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.

" + "documentation":"

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled.

This setting doesn't apply to Amazon Aurora. Mapping AWS IAM accounts to database accounts is managed by the DB cluster.

For more information, see IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.

" }, "EnablePerformanceInsights":{ "shape":"BooleanOptional", @@ -3638,7 +3643,7 @@ }, "EnableIAMDatabaseAuthentication":{ "shape":"BooleanOptional", - "documentation":"

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled. For information about the supported DB engines, see CreateDBInstance.

For more information about IAM database authentication, see IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.

" + "documentation":"

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled.

For more information about IAM database authentication, see IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.

" }, "EnablePerformanceInsights":{ "shape":"BooleanOptional", @@ -3679,6 +3684,10 @@ "ReplicaMode":{ "shape":"ReplicaMode", "documentation":"

The open mode of the replica database: mounted or read-only.

This parameter is only supported for Oracle DB instances.

Mounted DB replicas are included in Oracle Enterprise Edition. The main use case for mounted replicas is cross-Region disaster recovery. The primary database doesn't use Active Data Guard to transmit information to the mounted replica. Because it doesn't accept user connections, a mounted replica can't serve a read-only workload.

You can create a combination of mounted and read-only DB replicas for the same primary DB instance. For more information, see Working with Oracle Read Replicas for Amazon RDS in the Amazon RDS User Guide.

" + }, + "MaxAllocatedStorage":{ + "shape":"IntegerOptional", + "documentation":"

The upper limit to which Amazon RDS can automatically scale the storage of the DB instance.

" } } }, @@ -4240,7 +4249,7 @@ }, "EngineMode":{ "shape":"String", - "documentation":"

The DB engine mode of the DB cluster, either provisioned, serverless, parallelquery, global, or multimaster.

global engine mode only applies for global database clusters created with Aurora MySQL version 5.6.10a. For higher Aurora MySQL versions, the clusters in a global database use provisioned engine mode. To check if a DB cluster is part of a global database, use DescribeGlobalClusters instead of checking the EngineMode return value from DescribeDBClusters.

" + "documentation":"

The DB engine mode of the DB cluster, either provisioned, serverless, parallelquery, global, or multimaster.

For more information, see CreateDBCluster.

" }, "ScalingConfigurationInfo":{"shape":"ScalingConfigurationInfo"}, "DeletionProtection":{ @@ -4279,6 +4288,7 @@ "shape":"DomainMembershipList", "documentation":"

The Active Directory Domain membership records associated with the DB cluster.

" }, + "TagList":{"shape":"TagList"}, "GlobalWriteForwardingStatus":{ "shape":"WriteForwardingStatus", "documentation":"

Specifies whether a secondary cluster in an Aurora global database has write forwarding enabled, not enabled, or is in the process of enabling it.

" @@ -4812,7 +4822,8 @@ "IAMDatabaseAuthenticationEnabled":{ "shape":"Boolean", "documentation":"

True if mapping of AWS Identity and Access Management (IAM) accounts to database accounts is enabled, and otherwise false.

" - } + }, + "TagList":{"shape":"TagList"} }, "documentation":"

Contains the details for an Amazon RDS DB cluster snapshot

This data type is used as a response element in the DescribeDBClusterSnapshots action.

", "wrapper":true @@ -4955,7 +4966,7 @@ }, "SupportedEngineModes":{ "shape":"EngineModeList", - "documentation":"

A list of the supported DB engine modes.

global engine mode only applies for global database clusters created with Aurora MySQL version 5.6.10a. For higher Aurora MySQL versions, the clusters in a global database use provisioned engine mode.

" + "documentation":"

A list of the supported DB engine modes.

" }, "SupportedFeatureNames":{ "shape":"FeatureNameList", @@ -5239,7 +5250,8 @@ "MaxAllocatedStorage":{ "shape":"IntegerOptional", "documentation":"

The upper limit to which Amazon RDS can automatically scale the storage of the DB instance.

" - } + }, + "TagList":{"shape":"TagList"} }, "documentation":"

Contains the details of an Amazon RDS DB instance.

This data type is used as a response element in the DescribeDBInstances action.

", "wrapper":true @@ -5742,7 +5754,7 @@ "type":"structure", "members":{ }, - "documentation":"

The specified proxy name doesn't correspond to a proxy owned by your AWS accoutn in the specified AWS Region.

", + "documentation":"

The specified proxy name doesn't correspond to a proxy owned by your AWS account in the specified AWS Region.

", "error":{ "code":"DBProxyNotFoundFault", "httpStatusCode":404, @@ -6029,7 +6041,7 @@ }, "SnapshotCreateTime":{ "shape":"TStamp", - "documentation":"

Specifies when the snapshot was taken in Coodinated Universal Time (UTC).

" + "documentation":"

Specifies when the snapshot was taken in Coordinated Universal Time (UTC).

" }, "Engine":{ "shape":"String", @@ -6130,7 +6142,8 @@ "DbiResourceId":{ "shape":"String", "documentation":"

The identifier for the source DB instance, which can't be changed and which is unique to an AWS Region.

" - } + }, + "TagList":{"shape":"TagList"} }, "documentation":"

Contains the details of an Amazon RDS DB snapshot.

This data type is used as a response element in the DescribeDBSnapshots action.

", "wrapper":true @@ -9258,7 +9271,7 @@ }, "EngineVersion":{ "shape":"String", - "documentation":"

The version number of the database engine to upgrade to. Changing this parameter results in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is eanbled for this request.

For major version upgrades, if a nondefault DB parameter group is currently in use, a new DB parameter group in the DB parameter group family for the new engine version must be specified. The new DB parameter group can be the default for that DB parameter group family.

For information about valid engine versions, see CreateDBInstance, or call DescribeDBEngineVersions.

" + "documentation":"

The version number of the database engine to upgrade to. Changing this parameter results in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is enabled for this request.

For major version upgrades, if a nondefault DB parameter group is currently in use, a new DB parameter group in the DB parameter group family for the new engine version must be specified. The new DB parameter group can be the default for that DB parameter group family.

For information about valid engine versions, see CreateDBInstance, or call DescribeDBEngineVersions.

" }, "AllowMajorVersionUpgrade":{ "shape":"Boolean", @@ -9334,7 +9347,7 @@ }, "EnableIAMDatabaseAuthentication":{ "shape":"BooleanOptional", - "documentation":"

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled. For information about the supported DB engines, see CreateDBInstance.

For more information about IAM database authentication, see IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.

" + "documentation":"

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled.

This setting doesn't apply to Amazon Aurora. Mapping AWS IAM accounts to database accounts is managed by the DB cluster.

For more information about IAM database authentication, see IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.

" }, "EnablePerformanceInsights":{ "shape":"BooleanOptional", @@ -10179,7 +10192,7 @@ }, "SupportedEngineModes":{ "shape":"EngineModeList", - "documentation":"

A list of the supported DB engine modes.

global engine mode only applies for global database clusters created with Aurora MySQL version 5.6.10a. For higher Aurora MySQL versions, the clusters in a global database use provisioned engine mode.

" + "documentation":"

A list of the supported DB engine modes.

" }, "SupportsStorageAutoscaling":{ "shape":"BooleanOptional", @@ -11257,7 +11270,7 @@ }, "EngineMode":{ "shape":"String", - "documentation":"

The DB engine mode of the DB cluster, either provisioned, serverless, parallelquery, global, or multimaster.

" + "documentation":"

The DB engine mode of the DB cluster, either provisioned, serverless, parallelquery, global, or multimaster.

For more information, see CreateDBCluster.

" }, "ScalingConfiguration":{ "shape":"ScalingConfiguration", @@ -11475,7 +11488,7 @@ }, "EnableIAMDatabaseAuthentication":{ "shape":"BooleanOptional", - "documentation":"

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled. For information about the supported DB engines, see CreateDBInstance.

For more information about IAM database authentication, see IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.

" + "documentation":"

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled.

For more information about IAM database authentication, see IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.

" }, "EnableCloudwatchLogsExports":{ "shape":"LogTypeList", @@ -11640,7 +11653,7 @@ }, "EnableIAMDatabaseAuthentication":{ "shape":"BooleanOptional", - "documentation":"

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled. For information about the supported DB engines, see CreateDBInstance.

For more information about IAM database authentication, see IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.

" + "documentation":"

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled.

For more information about IAM database authentication, see IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.

" }, "SourceEngine":{ "shape":"String", @@ -11801,7 +11814,7 @@ }, "EnableIAMDatabaseAuthentication":{ "shape":"BooleanOptional", - "documentation":"

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled. For information about the supported DB engines, see CreateDBInstance.

For more information about IAM database authentication, see IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.

" + "documentation":"

A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled.

For more information about IAM database authentication, see IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.

" }, "EnableCloudwatchLogsExports":{ "shape":"LogTypeList", diff --git a/services/rdsdata/pom.xml b/services/rdsdata/pom.xml index acd14acf88ae..40df09f42890 100644 --- a/services/rdsdata/pom.xml +++ b/services/rdsdata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT rdsdata AWS Java SDK :: Services :: RDS Data diff --git a/services/redshift/pom.xml b/services/redshift/pom.xml index 0f638e6bd945..d932f47ad414 100644 --- a/services/redshift/pom.xml +++ b/services/redshift/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT redshift AWS Java SDK :: Services :: Amazon Redshift diff --git a/services/redshift/src/main/resources/codegen-resources/paginators-1.json b/services/redshift/src/main/resources/codegen-resources/paginators-1.json index b72738fbece6..5257747c4837 100644 --- a/services/redshift/src/main/resources/codegen-resources/paginators-1.json +++ b/services/redshift/src/main/resources/codegen-resources/paginators-1.json @@ -1,5 +1,11 @@ { "pagination": { + "DescribeClusterDbRevisions": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "ClusterDbRevisions" + }, "DescribeClusterParameterGroups": { "input_token": "Marker", "limit_key": "MaxRecords", @@ -30,6 +36,12 @@ "output_token": "Marker", "result_key": "ClusterSubnetGroups" }, + "DescribeClusterTracks": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "MaintenanceTracks" + }, "DescribeClusterVersions": { "input_token": "Marker", "limit_key": "MaxRecords", @@ -102,11 +114,41 @@ "output_token": "Marker", "result_key": "ScheduledActions" }, + "DescribeSnapshotCopyGrants": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "SnapshotCopyGrants" + }, + "DescribeSnapshotSchedules": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "SnapshotSchedules" + }, + "DescribeTableRestoreStatus": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "TableRestoreStatusDetails" + }, + "DescribeTags": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "TaggedResources" + }, "DescribeUsageLimits": { "input_token": "Marker", "limit_key": "MaxRecords", "output_token": "Marker", "result_key": "UsageLimits" + }, + "GetReservedNodeExchangeOfferings": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "ReservedNodeOfferings" } } } \ No newline at end of file diff --git a/services/redshift/src/main/resources/codegen-resources/service-2.json b/services/redshift/src/main/resources/codegen-resources/service-2.json index 85461d29a638..12136ef0de2a 100644 --- a/services/redshift/src/main/resources/codegen-resources/service-2.json +++ b/services/redshift/src/main/resources/codegen-resources/service-2.json @@ -2220,6 +2220,10 @@ "ResizeInfo":{ "shape":"ResizeInfo", "documentation":"

Returns the following:

  • AllowCancelResize: a boolean value indicating if the resize operation can be cancelled.

  • ResizeType: Returns ClassicResize

" + }, + "ClusterNamespaceArn":{ + "shape":"String", + "documentation":"

The namespace Amazon Resource Name (ARN) of the cluster.

" } }, "documentation":"

Describes a cluster.

", diff --git a/services/redshiftdata/pom.xml b/services/redshiftdata/pom.xml index 1d2ef9e57b4d..84be7ae6ebd2 100644 --- a/services/redshiftdata/pom.xml +++ b/services/redshiftdata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT redshiftdata AWS Java SDK :: Services :: Redshift Data diff --git a/services/rekognition/pom.xml b/services/rekognition/pom.xml index a853b3811bff..fbe041d17fc8 100644 --- a/services/rekognition/pom.xml +++ b/services/rekognition/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT rekognition AWS Java SDK :: Services :: Amazon Rekognition diff --git a/services/rekognition/src/main/resources/codegen-resources/service-2.json b/services/rekognition/src/main/resources/codegen-resources/service-2.json index 1520531737c0..e83fe15b42a4 100644 --- a/services/rekognition/src/main/resources/codegen-resources/service-2.json +++ b/services/rekognition/src/main/resources/codegen-resources/service-2.json @@ -358,6 +358,26 @@ ], "documentation":"

Detects unsafe content in a specified JPEG or PNG format image. Use DetectModerationLabels to moderate images depending on your requirements. For example, you might want to filter images that contain nudity, but not images containing suggestive content.

To filter images, use the labels returned by DetectModerationLabels to determine which types of content are appropriate.

For information about moderation labels, see Detecting Unsafe Content in the Amazon Rekognition Developer Guide.

You pass the input image either as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

" }, + "DetectProtectiveEquipment":{ + "name":"DetectProtectiveEquipment", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DetectProtectiveEquipmentRequest"}, + "output":{"shape":"DetectProtectiveEquipmentResponse"}, + "errors":[ + {"shape":"InvalidS3ObjectException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ImageTooLargeException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerError"}, + {"shape":"ThrottlingException"}, + {"shape":"ProvisionedThroughputExceededException"}, + {"shape":"InvalidImageFormatException"} + ], + "documentation":"

Detects Personal Protective Equipment (PPE) worn by people detected in an image. Amazon Rekognition can detect the following types of PPE.

  • Face cover

  • Hand cover

  • Head cover

You pass the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. The image must be either a PNG or JPG formatted file.

DetectProtectiveEquipment detects PPE worn by up to 15 persons detected in an image.

For each person detected in the image the API returns an array of body parts (face, head, left-hand, right-hand). For each body part, an array of detected items of PPE is returned, including an indicator of whether or not the PPE covers the body part. The API returns the confidence it has in each detection (person, PPE, body part and body part coverage). It also returns a bounding box (BoundingBox) for each detected person and each detected item of PPE.

You can optionally request a summary of detected PPE items with the SummarizationAttributes input parameter. The summary provides the following information.

  • The persons detected as wearing all of the types of PPE that you specify.

  • The persons detected as not wearing all of the types PPE that you specify.

  • The persons detected where PPE adornment could not be determined.

This is a stateless API operation. That is, the operation does not persist any data.

This operation requires permissions to perform the rekognition:DetectProtectiveEquipment action.

" + }, "DetectText":{ "name":"DetectText", "http":{ @@ -565,7 +585,8 @@ {"shape":"ThrottlingException"}, {"shape":"ProvisionedThroughputExceededException"}, {"shape":"ResourceNotFoundException"}, - {"shape":"InvalidImageFormatException"} + {"shape":"InvalidImageFormatException"}, + {"shape":"ServiceQuotaExceededException"} ], "documentation":"

Detects faces in the input image and adds them to the specified collection.

Amazon Rekognition doesn't save the actual faces that are detected. Instead, the underlying detection algorithm first detects the faces in the input image. For each face, the algorithm extracts facial features into a feature vector, and stores it in the backend database. Amazon Rekognition uses feature vectors when it performs face match and search operations using the SearchFaces and SearchFacesByImage operations.

For more information, see Adding Faces to a Collection in the Amazon Rekognition Developer Guide.

To get the number of faces in a collection, call DescribeCollection.

If you're using version 1.0 of the face detection model, IndexFaces indexes the 15 largest faces in the input image. Later versions of the face detection model index the 100 largest faces in the input image.

If you're using version 4 or later of the face model, image orientation information is not returned in the OrientationCorrection field.

To determine which version of the model you're using, call DescribeCollection and supply the collection ID. You can also get the model version from the value of FaceModelVersion in the response from IndexFaces

For more information, see Model Versioning in the Amazon Rekognition Developer Guide.

If you provide the optional ExternalImageId for the input image you provided, Amazon Rekognition associates this ID with all faces that it detects. When you call the ListFaces operation, the response returns the external ID. You can use this external image ID to create a client-side index to associate the faces with each image. You can then use the index to find all faces in an image.

You can specify the maximum number of faces to index with the MaxFaces input parameter. This is useful when you want to index the largest faces in an image and don't want to index smaller faces, such as those belonging to people standing in the background.

The QualityFilter input parameter allows you to filter out detected faces that don’t meet a required quality bar. The quality bar is based on a variety of common use cases. By default, IndexFaces chooses the quality bar that's used to filter faces. You can also explicitly choose the quality bar. Use QualityFilter, to set the quality bar by specifying LOW, MEDIUM, or HIGH. If you do not want to filter detected faces, specify NONE.

To use quality filtering, you need a collection associated with version 3 of the face model or higher. To get the version of the face model associated with a collection, call DescribeCollection.

Information about faces detected in an image, but not indexed, is returned in an array of UnindexedFace objects, UnindexedFaces. Faces aren't indexed for reasons such as:

  • The number of faces detected exceeds the value of the MaxFaces request parameter.

  • The face is too small compared to the image dimensions.

  • The face is too blurry.

  • The image is too dark.

  • The face has an extreme pose.

  • The face doesn’t have enough detail to be suitable for face search.

In response, the IndexFaces operation returns an array of metadata for all detected faces, FaceRecords. This includes:

  • The bounding box, BoundingBox, of the detected face.

  • A confidence value, Confidence, which indicates the confidence that the bounding box contains a face.

  • A face ID, FaceId, assigned by the service for each face that's detected and stored.

  • An image ID, ImageId, assigned by the service for the input image.

If you request all facial attributes (by using the detectionAttributes parameter), Amazon Rekognition returns detailed facial attributes, such as facial landmarks (for example, location of eye and mouth) and other facial attributes. If you provide the same image, specify the same collection, and use the same external ID in the IndexFaces operation, Amazon Rekognition doesn't save duplicate face metadata.

The input image is passed either as base64-encoded image bytes, or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes isn't supported. The image must be formatted as a PNG or JPEG file.

This operation requires permissions to perform the rekognition:IndexFaces action.

" }, @@ -1024,6 +1045,19 @@ }, "documentation":"

Indicates whether or not the face has a beard, and the confidence level in the determination.

" }, + "BodyPart":{ + "type":"string", + "enum":[ + "FACE", + "HEAD", + "LEFT_HAND", + "RIGHT_HAND" + ] + }, + "BodyParts":{ + "type":"list", + "member":{"shape":"ProtectiveEquipmentBodyPart"} + }, "Boolean":{"type":"boolean"}, "BoundingBox":{ "type":"structure", @@ -1045,7 +1079,7 @@ "documentation":"

Top coordinate of the bounding box as a ratio of overall image height.

" } }, - "documentation":"

Identifies the bounding box around the label, face, or text. The left (x-coordinate) and top (y-coordinate) are coordinates representing the top and left sides of the bounding box. Note that the upper-left corner of the image is the origin (0,0).

The top and left values returned are ratios of the overall image size. For example, if the input image is 700x200 pixels, and the top-left coordinate of the bounding box is 350x50 pixels, the API returns a left value of 0.5 (350/700) and a top value of 0.25 (50/200).

The width and height values represent the dimensions of the bounding box as a ratio of the overall image dimension. For example, if the input image is 700x200 pixels, and the bounding box width is 70 pixels, the width returned is 0.1.

The bounding box coordinates can have negative values. For example, if Amazon Rekognition is able to detect a face that is at the image edge and is only partially visible, the service can return coordinates that are outside the image bounds and, depending on the image edge, you might get negative values or values greater than 1 for the left or top values.

" + "documentation":"

Identifies the bounding box around the label, face, text or personal protective equipment. The left (x-coordinate) and top (y-coordinate) are coordinates representing the top and left sides of the bounding box. Note that the upper-left corner of the image is the origin (0,0).

The top and left values returned are ratios of the overall image size. For example, if the input image is 700x200 pixels, and the top-left coordinate of the bounding box is 350x50 pixels, the API returns a left value of 0.5 (350/700) and a top value of 0.25 (50/200).

The width and height values represent the dimensions of the bounding box as a ratio of the overall image dimension. For example, if the input image is 700x200 pixels, and the bounding box width is 70 pixels, the width returned is 0.1.

The bounding box coordinates can have negative values. For example, if Amazon Rekognition is able to detect a face that is at the image edge and is only partially visible, the service can return coordinates that are outside the image bounds and, depending on the image edge, you might get negative values or values greater than 1 for the left or top values.

" }, "BoundingBoxHeight":{ "type":"float", @@ -1311,6 +1345,20 @@ "TIMESTAMP" ] }, + "CoversBodyPart":{ + "type":"structure", + "members":{ + "Confidence":{ + "shape":"Percent", + "documentation":"

The confidence that Amazon Rekognition has in the value of Value.

" + }, + "Value":{ + "shape":"Boolean", + "documentation":"

True if the PPE covers the corresponding body part, otherwise false.

" + } + }, + "documentation":"

Information about an item of Personal Protective Equipment covering a corresponding body part. For more information, see DetectProtectiveEquipment.

" + }, "CreateCollectionRequest":{ "type":"structure", "required":["CollectionId"], @@ -1840,6 +1888,37 @@ } } }, + "DetectProtectiveEquipmentRequest":{ + "type":"structure", + "required":["Image"], + "members":{ + "Image":{ + "shape":"Image", + "documentation":"

The image in which you want to detect PPE on detected persons. The image can be passed as image bytes or you can reference an image stored in an Amazon S3 bucket.

" + }, + "SummarizationAttributes":{ + "shape":"ProtectiveEquipmentSummarizationAttributes", + "documentation":"

An array of PPE types that you want to summarize.

" + } + } + }, + "DetectProtectiveEquipmentResponse":{ + "type":"structure", + "members":{ + "ProtectiveEquipmentModelVersion":{ + "shape":"String", + "documentation":"

The version number of the PPE detection model used to detect PPE in the image.

" + }, + "Persons":{ + "shape":"ProtectiveEquipmentPersons", + "documentation":"

An array of persons detected in the image (including persons not wearing PPE).

" + }, + "Summary":{ + "shape":"ProtectiveEquipmentSummary", + "documentation":"

Summary information for the types of PPE specified in the SummarizationAttributes input parameter.

" + } + } + }, "DetectTextFilters":{ "type":"structure", "members":{ @@ -1928,6 +2007,32 @@ "type":"list", "member":{"shape":"Emotion"} }, + "EquipmentDetection":{ + "type":"structure", + "members":{ + "BoundingBox":{ + "shape":"BoundingBox", + "documentation":"

A bounding box surrounding the item of detected PPE.

" + }, + "Confidence":{ + "shape":"Percent", + "documentation":"

The confidence that Amazon Rekognition has that the bounding box (BoundingBox) contains an item of PPE.

" + }, + "Type":{ + "shape":"ProtectiveEquipmentType", + "documentation":"

The type of detected PPE.

" + }, + "CoversBodyPart":{ + "shape":"CoversBodyPart", + "documentation":"

Information about the body part covered by the detected PPE.

" + } + }, + "documentation":"

Information about an item of Personal Protective Equipment (PPE) detected by DetectProtectiveEquipment. For more information, see DetectProtectiveEquipment.

" + }, + "EquipmentDetections":{ + "type":"list", + "member":{"shape":"EquipmentDetection"} + }, "EvaluationResult":{ "type":"structure", "members":{ @@ -3490,6 +3595,102 @@ "max":100, "min":1 }, + "ProtectiveEquipmentBodyPart":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"BodyPart", + "documentation":"

The detected body part.

" + }, + "Confidence":{ + "shape":"Percent", + "documentation":"

The confidence that Amazon Rekognition has in the detection accuracy of the detected body part.

" + }, + "EquipmentDetections":{ + "shape":"EquipmentDetections", + "documentation":"

An array of Personal Protective Equipment items detected around a body part.

" + } + }, + "documentation":"

Information about a body part detected by DetectProtectiveEquipment that contains PPE. An array of ProtectiveEquipmentBodyPart objects is returned for each person detected by DetectProtectiveEquipment.

" + }, + "ProtectiveEquipmentPerson":{ + "type":"structure", + "members":{ + "BodyParts":{ + "shape":"BodyParts", + "documentation":"

An array of body parts detected on a person's body (including body parts without PPE).

" + }, + "BoundingBox":{ + "shape":"BoundingBox", + "documentation":"

A bounding box around the detected person.

" + }, + "Confidence":{ + "shape":"Percent", + "documentation":"

The confidence that Amazon Rekognition has that the bounding box contains a person.

" + }, + "Id":{ + "shape":"UInteger", + "documentation":"

The identifier for the detected person. The identifier is only unique for a single call to DetectProtectiveEquipment.

" + } + }, + "documentation":"

A person detected by a call to DetectProtectiveEquipment. The API returns all persons detected in the input image in an array of ProtectiveEquipmentPerson objects.

" + }, + "ProtectiveEquipmentPersonIds":{ + "type":"list", + "member":{"shape":"UInteger"} + }, + "ProtectiveEquipmentPersons":{ + "type":"list", + "member":{"shape":"ProtectiveEquipmentPerson"} + }, + "ProtectiveEquipmentSummarizationAttributes":{ + "type":"structure", + "required":[ + "MinConfidence", + "RequiredEquipmentTypes" + ], + "members":{ + "MinConfidence":{ + "shape":"Percent", + "documentation":"

The minimum confidence level for which you want summary information. The confidence level applies to person detection, body part detection, equipment detection, and body part coverage. Amazon Rekognition doesn't return summary information with a confidence than this specified value. There isn't a default value.

Specify a MinConfidence value that is between 50-100% as DetectProtectiveEquipment returns predictions only where the detection confidence is between 50% - 100%. If you specify a value that is less than 50%, the results are the same specifying a value of 50%.

" + }, + "RequiredEquipmentTypes":{ + "shape":"ProtectiveEquipmentTypes", + "documentation":"

An array of personal protective equipment types for which you want summary information. If a person is detected wearing a required requipment type, the person's ID is added to the PersonsWithRequiredEquipment array field returned in ProtectiveEquipmentSummary by DetectProtectiveEquipment.

" + } + }, + "documentation":"

Specifies summary attributes to return from a call to DetectProtectiveEquipment. You can specify which types of PPE to summarize. You can also specify a minimum confidence value for detections. Summary information is returned in the Summary (ProtectiveEquipmentSummary) field of the response from DetectProtectiveEquipment. The summary includes which persons in an image were detected wearing the requested types of person protective equipment (PPE), which persons were detected as not wearing PPE, and the persons in which a determination could not be made. For more information, see ProtectiveEquipmentSummary.

" + }, + "ProtectiveEquipmentSummary":{ + "type":"structure", + "members":{ + "PersonsWithRequiredEquipment":{ + "shape":"ProtectiveEquipmentPersonIds", + "documentation":"

An array of IDs for persons who are wearing detected personal protective equipment.

" + }, + "PersonsWithoutRequiredEquipment":{ + "shape":"ProtectiveEquipmentPersonIds", + "documentation":"

An array of IDs for persons who are not wearing all of the types of PPE specified in the RequiredEquipmentTypes field of the detected personal protective equipment.

" + }, + "PersonsIndeterminate":{ + "shape":"ProtectiveEquipmentPersonIds", + "documentation":"

An array of IDs for persons where it was not possible to determine if they are wearing personal protective equipment.

" + } + }, + "documentation":"

Summary information for required items of personal protective equipment (PPE) detected on persons by a call to DetectProtectiveEquipment. You specify the required type of PPE in the SummarizationAttributes (ProtectiveEquipmentSummarizationAttributes) input parameter. The summary includes which persons were detected wearing the required personal protective equipment (PersonsWithRequiredEquipment), which persons were detected as not wearing the required PPE (PersonsWithoutRequiredEquipment), and the persons in which a determination could not be made (PersonsIndeterminate).

To get a total for each category, use the size of the field array. For example, to find out how many people were detected as wearing the specified PPE, use the size of the PersonsWithRequiredEquipment array. If you want to find out more about a person, such as the location (BoundingBox) of the person on the image, use the person ID in each array element. Each person ID matches the ID field of a ProtectiveEquipmentPerson object returned in the Persons array by DetectProtectiveEquipment.

" + }, + "ProtectiveEquipmentType":{ + "type":"string", + "enum":[ + "FACE_COVER", + "HAND_COVER", + "HEAD_COVER" + ] + }, + "ProtectiveEquipmentTypes":{ + "type":"list", + "member":{"shape":"ProtectiveEquipmentType"} + }, "ProvisionedThroughputExceededException":{ "type":"structure", "members":{ @@ -3817,6 +4018,13 @@ "type":"list", "member":{"shape":"SegmentTypeInfo"} }, + "ServiceQuotaExceededException":{ + "type":"structure", + "members":{ + }, + "documentation":"

The size of the collection exceeds the allowed limit. For more information, see Limits in Amazon Rekognition in the Amazon Rekognition Developer Guide.

", + "exception":true + }, "ShotSegment":{ "type":"structure", "members":{ diff --git a/services/resourcegroups/pom.xml b/services/resourcegroups/pom.xml index b233d603c795..7adfb7a03a6a 100644 --- a/services/resourcegroups/pom.xml +++ b/services/resourcegroups/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT 4.0.0 resourcegroups diff --git a/services/resourcegroupstaggingapi/pom.xml b/services/resourcegroupstaggingapi/pom.xml index 646135f6c1e3..ad2488aba7ac 100644 --- a/services/resourcegroupstaggingapi/pom.xml +++ b/services/resourcegroupstaggingapi/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT resourcegroupstaggingapi AWS Java SDK :: Services :: AWS Resource Groups Tagging API diff --git a/services/robomaker/pom.xml b/services/robomaker/pom.xml index 00061f864ef7..a6a0ad0e9e31 100644 --- a/services/robomaker/pom.xml +++ b/services/robomaker/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT robomaker AWS Java SDK :: Services :: RoboMaker diff --git a/services/robomaker/src/main/resources/codegen-resources/service-2.json b/services/robomaker/src/main/resources/codegen-resources/service-2.json index 9dd3ea9b4c9f..6695a55fbf69 100644 --- a/services/robomaker/src/main/resources/codegen-resources/service-2.json +++ b/services/robomaker/src/main/resources/codegen-resources/service-2.json @@ -1728,7 +1728,7 @@ }, "failureCode":{ "shape":"WorldExportJobErrorCode", - "documentation":"

The failure code of the world export job if it failed:

InternalServiceError

Internal service error.

LimitExceeded

The requested resource exceeds the maximum number allowed, or the number of concurrent stream requests exceeds the maximum number allowed.

ResourceNotFound

The specified resource could not be found.

RequestThrottled

The request was throttled.

InvalidInput

An input parameter in the request is not valid.

" + "documentation":"

The failure code of the world export job if it failed:

InternalServiceError

Internal service error.

LimitExceeded

The requested resource exceeds the maximum number allowed, or the number of concurrent stream requests exceeds the maximum number allowed.

ResourceNotFound

The specified resource could not be found.

RequestThrottled

The request was throttled.

InvalidInput

An input parameter in the request is not valid.

AllWorldGenerationFailed

All of the worlds in the world generation job failed. This can happen if your worldCount is greater than 50 or less than 1.

For more information about troubleshooting WorldForge, see Troubleshooting Simulation WorldForge.

" }, "clientRequestToken":{ "shape":"ClientRequestToken", @@ -1768,6 +1768,10 @@ "tags":{ "shape":"TagMap", "documentation":"

A map that contains tag keys and tag values that are attached to the world generator job.

" + }, + "worldTags":{ + "shape":"TagMap", + "documentation":"

A map that contains tag keys and tag values that are attached to the generated worlds.

" } } }, @@ -1805,6 +1809,10 @@ "tags":{ "shape":"TagMap", "documentation":"

A map that contains tag keys and tag values that are attached to the world generator job.

" + }, + "worldTags":{ + "shape":"TagMap", + "documentation":"

A map that contains tag keys and tag values that are attached to the generated worlds.

" } } }, @@ -2728,6 +2736,10 @@ "tags":{ "shape":"TagMap", "documentation":"

A map that contains tag keys and tag values that are attached to the world generation job.

" + }, + "worldTags":{ + "shape":"TagMap", + "documentation":"

A map that contains tag keys and tag values that are attached to the generated worlds.

" } } }, diff --git a/services/route53/pom.xml b/services/route53/pom.xml index f23769975d9e..c387eb34c06b 100644 --- a/services/route53/pom.xml +++ b/services/route53/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT route53 AWS Java SDK :: Services :: Amazon Route53 diff --git a/services/route53domains/pom.xml b/services/route53domains/pom.xml index f228e232cebe..43c6b74100a1 100644 --- a/services/route53domains/pom.xml +++ b/services/route53domains/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT route53domains AWS Java SDK :: Services :: Amazon Route53 Domains diff --git a/services/route53resolver/pom.xml b/services/route53resolver/pom.xml index 1847b28f6469..f5c60eaa9d32 100644 --- a/services/route53resolver/pom.xml +++ b/services/route53resolver/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT route53resolver AWS Java SDK :: Services :: Route53Resolver diff --git a/services/s3/pom.xml b/services/s3/pom.xml index ce8d0f03fa83..f3704df5115a 100644 --- a/services/s3/pom.xml +++ b/services/s3/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT s3 AWS Java SDK :: Services :: Amazon S3 diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/S3Utilities.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/S3Utilities.java index 72c4b6c98589..f7aaa37a0981 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/S3Utilities.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/S3Utilities.java @@ -36,7 +36,8 @@ import software.amazon.awssdk.protocols.core.PathMarshaller; import software.amazon.awssdk.protocols.core.ProtocolUtils; import software.amazon.awssdk.regions.Region; -import software.amazon.awssdk.services.s3.internal.S3EndpointUtils; +import software.amazon.awssdk.services.s3.internal.endpoints.S3EndpointResolverContext; +import software.amazon.awssdk.services.s3.internal.endpoints.S3EndpointResolverFactory; import software.amazon.awssdk.services.s3.model.GetObjectRequest; import software.amazon.awssdk.services.s3.model.GetUrlRequest; import software.amazon.awssdk.utils.Validate; @@ -156,12 +157,17 @@ public URL getUrl(GetUrlRequest getUrlRequest) { .key(getUrlRequest.key()) .build(); - SdkHttpRequest httpRequest = S3EndpointUtils.applyEndpointConfiguration(marshalledRequest, - getObjectRequest, - resolvedRegion, - s3Configuration, - endpointOverridden) - .sdkHttpRequest(); + S3EndpointResolverContext resolverContext = S3EndpointResolverContext.builder() + .request(marshalledRequest) + .originalRequest(getObjectRequest) + .region(resolvedRegion) + .endpointOverridden(endpointOverridden) + .serviceConfiguration(s3Configuration) + .build(); + + SdkHttpRequest httpRequest = S3EndpointResolverFactory.getEndpointResolver(getObjectRequest.bucket()) + .applyEndpointConfiguration(resolverContext) + .sdkHttpRequest(); try { return httpRequest.getUri().toURL(); diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/S3EndpointUtils.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/S3EndpointUtils.java deleted file mode 100644 index 444546f2d1f5..000000000000 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/S3EndpointUtils.java +++ /dev/null @@ -1,356 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.s3.internal; - -import static software.amazon.awssdk.utils.FunctionalUtils.invokeSafely; - -import java.net.URI; -import java.net.URISyntaxException; -import java.util.Arrays; -import java.util.List; -import software.amazon.awssdk.annotations.SdkInternalApi; -import software.amazon.awssdk.arns.Arn; -import software.amazon.awssdk.core.SdkRequest; -import software.amazon.awssdk.http.SdkHttpRequest; -import software.amazon.awssdk.regions.PartitionMetadata; -import software.amazon.awssdk.regions.Region; -import software.amazon.awssdk.regions.RegionMetadata; -import software.amazon.awssdk.services.s3.S3Configuration; -import software.amazon.awssdk.services.s3.internal.resource.S3AccessPointBuilder; -import software.amazon.awssdk.services.s3.internal.resource.S3AccessPointResource; -import software.amazon.awssdk.services.s3.internal.resource.S3ArnConverter; -import software.amazon.awssdk.services.s3.internal.resource.S3OutpostAccessPointBuilder; -import software.amazon.awssdk.services.s3.internal.resource.S3OutpostResource; -import software.amazon.awssdk.services.s3.internal.resource.S3Resource; -import software.amazon.awssdk.services.s3.internal.resource.S3ResourceType; -import software.amazon.awssdk.services.s3.model.CreateBucketRequest; -import software.amazon.awssdk.services.s3.model.DeleteBucketRequest; -import software.amazon.awssdk.services.s3.model.ListBucketsRequest; -import software.amazon.awssdk.utils.Validate; - -/** - * Utilities for working with Amazon S3 bucket names, such as validation and - * checked to see if they are compatible with DNS addressing. - */ -@SdkInternalApi -public final class S3EndpointUtils { - - private static final String S3_OUTPOSTS_NAME = "s3-outposts"; - private static final List> ACCELERATE_DISABLED_OPERATIONS = Arrays.asList( - ListBucketsRequest.class, CreateBucketRequest.class, DeleteBucketRequest.class); - - private S3EndpointUtils() { - } - - /** - * Returns a new instance of the given {@link SdkHttpRequest} by applying any endpoint changes based on - * the given {@link S3Configuration} options. - */ - public static ConfiguredS3SdkHttpRequest applyEndpointConfiguration(SdkHttpRequest request, - SdkRequest originalRequest, - Region region, - S3Configuration serviceConfiguration, - boolean endpointOverridden) { - String bucketName = originalRequest.getValueForField("Bucket", String.class).orElse(null); - String key = originalRequest.getValueForField("Key", String.class).orElse(null); - - if (bucketName != null && isArn(bucketName)) { - return applyEndpointConfigurationForAccessPointArn(request, region, endpointOverridden, - serviceConfiguration, bucketName, key); - } - - SdkHttpRequest.Builder mutableRequest = request.toBuilder(); - - URI endpoint = resolveEndpoint(request, originalRequest, region, serviceConfiguration); - mutableRequest.uri(endpoint); - - if (serviceConfiguration == null || !serviceConfiguration.pathStyleAccessEnabled()) { - if (bucketName != null) { - if (BucketUtils.isVirtualAddressingCompatibleBucketName(bucketName, false)) { - changeToDnsEndpoint(mutableRequest, bucketName); - } - } - } - - return ConfiguredS3SdkHttpRequest.builder() - .sdkHttpRequest(mutableRequest.build()) - .build(); - } - - private static ConfiguredS3SdkHttpRequest applyEndpointConfigurationForAccessPointArn( - SdkHttpRequest request, - Region region, - boolean endpointOverridden, - S3Configuration serviceConfiguration, - String bucketName, - String key) { - - Arn resourceArn = Arn.fromString(bucketName); - S3Resource s3Resource = S3ArnConverter.create().convertArn(resourceArn); - - if (S3ResourceType.fromValue(s3Resource.type()) != S3ResourceType.ACCESS_POINT) { - throw new IllegalArgumentException("An ARN was passed as a bucket parameter to an S3 operation, " - + "however it does not appear to be a valid S3 access point ARN."); - } - - PartitionMetadata clientPartitionMetadata = PartitionMetadata.of(region); - - String arnRegion = validateConfiguration(region, endpointOverridden, serviceConfiguration, s3Resource); - - S3AccessPointResource s3EndpointResource = - Validate.isInstanceOf(S3AccessPointResource.class, s3Resource, - "An ARN was passed as a bucket parameter to an S3 operation, however it does not " - + "appear to be a valid S3 access point ARN."); - - URI accessPointUri = getUriForAccessPointResource(request, - serviceConfiguration, - arnRegion, - clientPartitionMetadata, - s3EndpointResource, region); - SdkHttpRequest httpRequest = request.toBuilder() - .protocol(accessPointUri.getScheme()) - .host(accessPointUri.getHost()) - .port(accessPointUri.getPort()) - .encodedPath(key) - .build(); - - String signingServiceModification = s3EndpointResource.parentS3Resource() - .filter(r -> r instanceof S3OutpostResource) - .map(ignore -> S3_OUTPOSTS_NAME) - .orElse(null); - - return ConfiguredS3SdkHttpRequest.builder() - .sdkHttpRequest(httpRequest) - .signingRegionModification(Region.of(arnRegion)) - .signingServiceModification(signingServiceModification) - .build(); - } - - private static URI getUriForAccessPointResource(SdkHttpRequest request, - S3Configuration serviceConfiguration, - String arnRegion, - PartitionMetadata clientPartitionMetadata, - S3AccessPointResource s3EndpointResource, - Region region) { - - // DualstackEnabled considered false by default - boolean dualstackEnabled = serviceConfiguration != null && serviceConfiguration.dualstackEnabled(); - boolean fipsRegionProvided = isFipsRegionProvided(region.toString(), arnRegion, - serviceConfiguration != null - && serviceConfiguration.useArnRegionEnabled()); - - String accountId = s3EndpointResource.accountId().orElseThrow(() -> new IllegalArgumentException( - "An S3 access point ARN must have an account ID")); - String accessPointName = s3EndpointResource.accessPointName(); - - if (s3EndpointResource.parentS3Resource().filter(r -> r instanceof S3OutpostResource).isPresent()) { - - if (dualstackEnabled) { - throw new IllegalArgumentException("An Outpost Access Point ARN cannot be passed as a bucket parameter to an S3 " - + "operation if the S3 client has been configured with dualstack"); - } - - if (isFipsRegion(region.toString())) { - throw new IllegalArgumentException("An access point ARN cannot be passed as a bucket parameter to an S3" - + " operation if the S3 client has been configured with a FIPS" - + " enabled region."); - } - - S3OutpostResource parentResource = (S3OutpostResource) s3EndpointResource.parentS3Resource().get(); - return S3OutpostAccessPointBuilder.create() - .accountId(accountId) - .outpostId(parentResource.outpostId()) - .region(arnRegion) - .accessPointName(accessPointName) - .protocol(request.protocol()) - .domain(clientPartitionMetadata.dnsSuffix()) - .toUri(); - } - return S3AccessPointBuilder.create() - .accessPointName(accessPointName) - .accountId(accountId) - .fipsEnabled(fipsRegionProvided) - .region(removeFipsIfNeeded(arnRegion)) - .protocol(request.protocol()) - .domain(clientPartitionMetadata.dnsSuffix()) - .dualstackEnabled(dualstackEnabled) - .toUri(); - } - - private static String validateConfiguration(Region region, - boolean endpointOverridden, - S3Configuration serviceConfiguration, - S3Resource s3Resource) { - String arnRegion = s3Resource.region().orElseThrow(() -> new IllegalArgumentException( - "An S3 access point ARN must have a region")); - - if (serviceConfiguration != null && serviceConfiguration.accelerateModeEnabled()) { - throw new IllegalArgumentException("An access point ARN cannot be passed as a bucket parameter to an S3 " - + "operation if the S3 client has been configured with accelerate mode" - + " enabled."); - } - - if (serviceConfiguration != null && serviceConfiguration.pathStyleAccessEnabled()) { - throw new IllegalArgumentException("An access point ARN cannot be passed as a bucket parameter to an S3 " - + "operation if the S3 client has been configured with path style " - + "addressing enabled."); - } - - if (endpointOverridden) { - throw new IllegalArgumentException("An access point ARN cannot be passed as a bucket parameter to an S3" - + " operation if the S3 client has been configured with an endpoint " - + "override."); - } - - String trimmedArnRegion = removeFipsIfNeeded(arnRegion); - if (serviceConfiguration == null || !serviceConfiguration.useArnRegionEnabled()) { - if (!removeFipsIfNeeded(region.id()).equals(trimmedArnRegion)) { - throw new IllegalArgumentException( - String.format("The region field of the ARN being passed as a bucket parameter to an S3 operation " - + "does not match the region the client was configured with. To enable this " - + "behavior and prevent this exception set 'useArnRegionEnabled' to true in the " - + "configuration when building the S3 client. Provided region: '%s'; client region:" - + " '%s'.", arnRegion, region)); - } - } - - PartitionMetadata clientPartitionMetadata = PartitionMetadata.of(region); - String clientPartition = clientPartitionMetadata.id(); - - if (clientPartition == null || clientPartition.isEmpty() || !s3Resource.partition().isPresent() - || !clientPartition.equals(s3Resource.partition().get())) { - throw new IllegalArgumentException( - String.format("The partition field of the ARN being passed as a bucket parameter to an S3 operation " - + "does not match the partition the S3 client has been configured with. Provided " - + "partition: '%s'; client partition: '%s'.", s3Resource.partition().orElse(""), - clientPartition)); - } - return arnRegion; - } - - private static String removeFipsIfNeeded(String region) { - if (region.startsWith("fips-")) { - return region.replace("fips-", ""); - } - - if (region.endsWith("-fips")) { - return region.replace("-fips", ""); - } - return region; - } - - /** - * Returns whether a FIPS pseudo region is provided. - */ - private static boolean isFipsRegionProvided(String clientRegion, String arnRegion, boolean useArnRegion) { - if (useArnRegion) { - return isFipsRegion(arnRegion); - } - - return isFipsRegion(clientRegion); - } - - /** - * Determine which endpoint to use based on region and {@link S3Configuration}. Will either be a traditional - * S3 endpoint (i.e. s3.us-east-1.amazonaws.com), the global S3 accelerate endpoint (i.e. s3-accelerate.amazonaws.com) or - * a regional dualstack endpoint for IPV6 (i.e. s3.dualstack.us-east-1.amazonaws.com). - */ - private static URI resolveEndpoint(SdkHttpRequest request, - SdkRequest originalRequest, - Region region, - S3Configuration serviceConfiguration) { - - String protocol = request.protocol(); - - RegionMetadata regionMetadata = RegionMetadata.of(region); - - if (isAccelerateEnabled(serviceConfiguration) && isAccelerateSupported(originalRequest)) { - return accelerateEndpoint(serviceConfiguration, regionMetadata, protocol); - } - - if (serviceConfiguration != null && serviceConfiguration.dualstackEnabled()) { - return dualstackEndpoint(regionMetadata, protocol); - } - - return invokeSafely(() -> new URI(request.protocol(), null, request.host(), request.port(), null, null, null)); - } - - /** - * Changes from path style addressing (which the marshallers produce by default, to DNS style or virtual style addressing - * where the bucket name is prepended to the host. DNS style addressing is preferred due to the better load balancing - * qualities it provides, path style is an option mainly for proxy based situations and alternative S3 implementations. - * - * @param mutableRequest Marshalled HTTP request we are modifying. - * @param bucketName Bucket name for this particular operation. - */ - private static void changeToDnsEndpoint(SdkHttpRequest.Builder mutableRequest, String bucketName) { - if (mutableRequest.host().startsWith("s3")) { - String newHost = mutableRequest.host().replaceFirst("s3", bucketName + "." + "s3"); - String newPath = mutableRequest.encodedPath().replaceFirst("/" + bucketName, ""); - - mutableRequest.host(newHost).encodedPath(newPath); - } - } - - /** - * @return dual stack endpoint from given protocol and region metadata - */ - private static URI dualstackEndpoint(RegionMetadata metadata, String protocol) { - String serviceEndpoint = String.format("%s.%s.%s.%s", "s3", "dualstack", metadata.id(), metadata.domain()); - return toUri(protocol, serviceEndpoint); - } - - /** - * @return True if accelerate mode is enabled per {@link S3Configuration}, false if not. - */ - private static boolean isAccelerateEnabled(S3Configuration serviceConfiguration) { - return serviceConfiguration != null && serviceConfiguration.accelerateModeEnabled(); - } - - /** - * @param originalRequest Request object to identify the operation. - * @return True if accelerate is supported for the given operation, false if not. - */ - private static boolean isAccelerateSupported(SdkRequest originalRequest) { - return !ACCELERATE_DISABLED_OPERATIONS.contains(originalRequest.getClass()); - } - - /** - * @return The endpoint for an S3 accelerate enabled operation. S3 accelerate has a single global endpoint. - */ - private static URI accelerateEndpoint(S3Configuration serviceConfiguration, RegionMetadata metadata, String protocol) { - if (serviceConfiguration.dualstackEnabled()) { - return toUri(protocol, "s3-accelerate.dualstack." + metadata.domain()); - } - return toUri(protocol, "s3-accelerate." + metadata.domain()); - } - - private static URI toUri(String protocol, String endpoint) { - try { - return new URI(String.format("%s://%s", protocol, endpoint)); - } catch (URISyntaxException e) { - throw new IllegalArgumentException(e); - } - } - - private static boolean isArn(String s) { - return s.startsWith("arn:"); - } - - private static boolean isFipsRegion(String region) { - return region.startsWith("fips-") || region.endsWith("-fips"); - } -} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/endpoints/S3AccessPointEndpointResolver.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/endpoints/S3AccessPointEndpointResolver.java new file mode 100644 index 000000000000..fd6edb22ecde --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/endpoints/S3AccessPointEndpointResolver.java @@ -0,0 +1,212 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.endpoints; + +import static software.amazon.awssdk.services.s3.internal.endpoints.S3EndpointUtils.isAccelerateEnabled; +import static software.amazon.awssdk.services.s3.internal.endpoints.S3EndpointUtils.isArnRegionEnabled; +import static software.amazon.awssdk.services.s3.internal.endpoints.S3EndpointUtils.isDualstackEnabled; +import static software.amazon.awssdk.services.s3.internal.endpoints.S3EndpointUtils.isFipsRegion; +import static software.amazon.awssdk.services.s3.internal.endpoints.S3EndpointUtils.isFipsRegionProvided; +import static software.amazon.awssdk.services.s3.internal.endpoints.S3EndpointUtils.isPathStyleAccessEnabled; +import static software.amazon.awssdk.services.s3.internal.endpoints.S3EndpointUtils.removeFipsIfNeeded; + +import java.net.URI; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.arns.Arn; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.regions.PartitionMetadata; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Configuration; +import software.amazon.awssdk.services.s3.internal.ConfiguredS3SdkHttpRequest; +import software.amazon.awssdk.services.s3.internal.resource.S3AccessPointBuilder; +import software.amazon.awssdk.services.s3.internal.resource.S3AccessPointResource; +import software.amazon.awssdk.services.s3.internal.resource.S3ArnConverter; +import software.amazon.awssdk.services.s3.internal.resource.S3OutpostAccessPointBuilder; +import software.amazon.awssdk.services.s3.internal.resource.S3OutpostResource; +import software.amazon.awssdk.services.s3.internal.resource.S3Resource; +import software.amazon.awssdk.services.s3.internal.resource.S3ResourceType; +import software.amazon.awssdk.utils.Validate; + +/** + * Returns a new configured HTTP request with a resolved access point endpoint and signing overrides. + */ +@SdkInternalApi +public final class S3AccessPointEndpointResolver implements S3EndpointResolver { + + private static final String S3_OUTPOSTS_NAME = "s3-outposts"; + + private S3AccessPointEndpointResolver() { + } + + public static S3AccessPointEndpointResolver create() { + return new S3AccessPointEndpointResolver(); + } + + @Override + public ConfiguredS3SdkHttpRequest applyEndpointConfiguration(S3EndpointResolverContext context) { + + S3Resource s3Resource = S3ArnConverter.create().convertArn(Arn.fromString(getBucketName(context))); + if (S3ResourceType.fromValue(s3Resource.type()) != S3ResourceType.ACCESS_POINT) { + throw new IllegalArgumentException("An ARN was passed as a bucket parameter to an S3 operation, " + + "however it does not appear to be a valid S3 access point ARN."); + } + + Region region = context.region(); + PartitionMetadata clientPartitionMetadata = PartitionMetadata.of(region); + + String arnRegion = validateConfiguration(context, s3Resource); + + S3AccessPointResource s3EndpointResource = Validate.isInstanceOf(S3AccessPointResource.class, s3Resource, + "An ARN was passed as a bucket parameter to an S3 operation, however it does not " + + "appear to be a valid S3 access point ARN."); + + URI accessPointUri = getUriForAccessPointResource(context, arnRegion, clientPartitionMetadata, s3EndpointResource); + String key = context.originalRequest().getValueForField("Key", String.class).orElse(null); + SdkHttpRequest httpRequest = context.request().toBuilder() + .protocol(accessPointUri.getScheme()) + .host(accessPointUri.getHost()) + .port(accessPointUri.getPort()) + .encodedPath(key) + .build(); + + String signingServiceModification = s3EndpointResource.parentS3Resource() + .filter(r -> r instanceof S3OutpostResource) + .map(ignore -> S3_OUTPOSTS_NAME) + .orElse(null); + + return ConfiguredS3SdkHttpRequest.builder() + .sdkHttpRequest(httpRequest) + .signingRegionModification(Region.of(arnRegion)) + .signingServiceModification(signingServiceModification) + .build(); + } + + private String validateConfiguration(S3EndpointResolverContext context, S3Resource s3Resource) { + Region region = context.region(); + String arnRegion = s3Resource.region().orElseThrow(() -> new IllegalArgumentException( + "An S3 access point ARN must have a region")); + + + S3Configuration serviceConfiguration = context.serviceConfiguration(); + if (isAccelerateEnabled(serviceConfiguration)) { + throw new IllegalArgumentException("An access point ARN cannot be passed as a bucket parameter to an S3 " + + "operation if the S3 client has been configured with accelerate mode" + + " enabled."); + } + + if (isPathStyleAccessEnabled(serviceConfiguration)) { + throw new IllegalArgumentException("An access point ARN cannot be passed as a bucket parameter to an S3 " + + "operation if the S3 client has been configured with path style " + + "addressing enabled."); + } + + if (context.endpointOverridden()) { + throw new IllegalArgumentException("An access point ARN cannot be passed as a bucket parameter to an S3" + + " operation if the S3 client has been configured with an endpoint " + + "override."); + } + + if (!isArnRegionEnabled(serviceConfiguration) && clientRegionDiffersFromArnRegion(region, arnRegion)) { + throw new IllegalArgumentException( + String.format("The region field of the ARN being passed as a bucket parameter to an S3 operation " + + "does not match the region the client was configured with. To enable this " + + "behavior and prevent this exception set 'useArnRegionEnabled' to true in the " + + "configuration when building the S3 client. Provided region: '%s'; client region:" + + " '%s'.", arnRegion, region)); + } + + String clientPartition = PartitionMetadata.of(region).id(); + + if (illegalPartitionConfiguration(s3Resource, clientPartition)) { + throw new IllegalArgumentException( + String.format("The partition field of the ARN being passed as a bucket parameter to an S3 operation " + + "does not match the partition the S3 client has been configured with. Provided " + + "partition: '%s'; client partition: '%s'.", s3Resource.partition().orElse(""), + clientPartition)); + } + return arnRegion; + } + + private boolean clientRegionDiffersFromArnRegion(Region clientRegion, String arnRegion) { + return !removeFipsIfNeeded(clientRegion.id()).equals(removeFipsIfNeeded(arnRegion)); + } + + private boolean illegalPartitionConfiguration(S3Resource s3Resource, String clientPartition) { + return clientPartition == null || clientPartition.isEmpty() || !s3Resource.partition().isPresent() + || !clientPartition.equals(s3Resource.partition().get()); + } + + private String getBucketName(S3EndpointResolverContext context) { + return context.originalRequest().getValueForField("Bucket", String.class).orElseThrow( + () -> new IllegalArgumentException("Bucket name cannot be empty when parsing access points.")); + } + + private URI getUriForAccessPointResource(S3EndpointResolverContext context, String arnRegion, + PartitionMetadata clientPartitionMetadata, + S3AccessPointResource s3EndpointResource) { + + boolean dualstackEnabled = isDualstackEnabled(context.serviceConfiguration()); + boolean fipsRegionProvided = isFipsRegionProvided(context.region().toString(), arnRegion, + isArnRegionEnabled(context.serviceConfiguration())); + + String accountId = s3EndpointResource.accountId().orElseThrow(() -> new IllegalArgumentException( + "An S3 access point ARN must have an account ID")); + String accessPointName = s3EndpointResource.accessPointName(); + + if (isOutpostAccessPoint(s3EndpointResource)) { + return getOutpostAccessPointUri(context, arnRegion, clientPartitionMetadata, s3EndpointResource); + } + + return S3AccessPointBuilder.create() + .accessPointName(accessPointName) + .accountId(accountId) + .fipsEnabled(fipsRegionProvided) + .region(removeFipsIfNeeded(arnRegion)) + .protocol(context.request().protocol()) + .domain(clientPartitionMetadata.dnsSuffix()) + .dualstackEnabled(dualstackEnabled) + .toUri(); + } + + private boolean isOutpostAccessPoint(S3AccessPointResource s3EndpointResource) { + return s3EndpointResource.parentS3Resource().filter(r -> r instanceof S3OutpostResource).isPresent(); + } + + private URI getOutpostAccessPointUri(S3EndpointResolverContext context, String arnRegion, + PartitionMetadata clientPartitionMetadata, S3AccessPointResource s3EndpointResource) { + if (isDualstackEnabled(context.serviceConfiguration())) { + throw new IllegalArgumentException("An Outpost Access Point ARN cannot be passed as a bucket parameter to an S3 " + + "operation if the S3 client has been configured with dualstack"); + } + + if (isFipsRegion(context.region().toString())) { + throw new IllegalArgumentException("An access point ARN cannot be passed as a bucket parameter to an S3" + + " operation if the S3 client has been configured with a FIPS" + + " enabled region."); + } + + S3OutpostResource parentResource = (S3OutpostResource) s3EndpointResource.parentS3Resource().get(); + return S3OutpostAccessPointBuilder.create() + .accountId(s3EndpointResource.accountId().get()) + .outpostId(parentResource.outpostId()) + .region(arnRegion) + .accessPointName(s3EndpointResource.accessPointName()) + .protocol(context.request().protocol()) + .domain(clientPartitionMetadata.dnsSuffix()) + .toUri(); + } + +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/endpoints/S3BucketEndpointResolver.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/endpoints/S3BucketEndpointResolver.java new file mode 100644 index 000000000000..a3284baf7782 --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/endpoints/S3BucketEndpointResolver.java @@ -0,0 +1,106 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.endpoints; + +import static software.amazon.awssdk.services.s3.internal.endpoints.S3EndpointUtils.accelerateEndpoint; +import static software.amazon.awssdk.services.s3.internal.endpoints.S3EndpointUtils.dualstackEndpoint; +import static software.amazon.awssdk.services.s3.internal.endpoints.S3EndpointUtils.isAccelerateEnabled; +import static software.amazon.awssdk.services.s3.internal.endpoints.S3EndpointUtils.isAccelerateSupported; +import static software.amazon.awssdk.services.s3.internal.endpoints.S3EndpointUtils.isDualstackEnabled; +import static software.amazon.awssdk.services.s3.internal.endpoints.S3EndpointUtils.isPathStyleAccessEnabled; +import static software.amazon.awssdk.utils.FunctionalUtils.invokeSafely; + +import java.net.URI; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.regions.RegionMetadata; +import software.amazon.awssdk.services.s3.S3Configuration; +import software.amazon.awssdk.services.s3.internal.BucketUtils; +import software.amazon.awssdk.services.s3.internal.ConfiguredS3SdkHttpRequest; + +/** + * Returns a new configured HTTP request with a resolved endpoint with either virtual addressing or path style access. + * Supports accelerate and dual stack. + */ +@SdkInternalApi +public final class S3BucketEndpointResolver implements S3EndpointResolver { + + private S3BucketEndpointResolver() { + } + + public static S3BucketEndpointResolver create() { + return new S3BucketEndpointResolver(); + } + + @Override + public ConfiguredS3SdkHttpRequest applyEndpointConfiguration(S3EndpointResolverContext context) { + URI endpoint = resolveEndpoint(context); + SdkHttpRequest.Builder mutableRequest = context.request().toBuilder(); + mutableRequest.uri(endpoint); + + String bucketName = context.originalRequest().getValueForField("Bucket", String.class).orElse(null); + if (canUseVirtualAddressing(context.serviceConfiguration(), bucketName)) { + changeToDnsEndpoint(mutableRequest, bucketName); + } + + return ConfiguredS3SdkHttpRequest.builder() + .sdkHttpRequest(mutableRequest.build()) + .build(); + } + + /** + * Determine which endpoint to use based on region and {@link S3Configuration}. Will either be a traditional + * S3 endpoint (i.e. s3.us-east-1.amazonaws.com), the global S3 accelerate endpoint (i.e. s3-accelerate.amazonaws.com) or + * a regional dualstack endpoint for IPV6 (i.e. s3.dualstack.us-east-1.amazonaws.com). + */ + private static URI resolveEndpoint(S3EndpointResolverContext context) { + SdkHttpRequest request = context.request(); + String protocol = request.protocol(); + RegionMetadata regionMetadata = RegionMetadata.of(context.region()); + S3Configuration serviceConfiguration = context.serviceConfiguration(); + + if (isAccelerateEnabled(serviceConfiguration) && isAccelerateSupported(context.originalRequest())) { + return accelerateEndpoint(serviceConfiguration, regionMetadata.domain(), protocol); + } + + if (isDualstackEnabled(serviceConfiguration)) { + return dualstackEndpoint(regionMetadata.id(), regionMetadata.domain(), protocol); + } + + return invokeSafely(() -> new URI(protocol, null, request.host(), request.port(), null, null, null)); + } + + private static boolean canUseVirtualAddressing(S3Configuration serviceConfiguration, String bucketName) { + return !isPathStyleAccessEnabled(serviceConfiguration) && bucketName != null && + BucketUtils.isVirtualAddressingCompatibleBucketName(bucketName, false); + } + + /** + * Changes from path style addressing (which the marshallers produce by default), to DNS style/virtual style addressing, + * where the bucket name is prepended to the host. DNS style addressing is preferred due to the better load balancing + * qualities it provides; path style is an option mainly for proxy based situations and alternative S3 implementations. + * + * @param mutableRequest Marshalled HTTP request we are modifying. + * @param bucketName Bucket name for this particular operation. + */ + private static void changeToDnsEndpoint(SdkHttpRequest.Builder mutableRequest, String bucketName) { + if (mutableRequest.host().startsWith("s3")) { + String newHost = mutableRequest.host().replaceFirst("s3", bucketName + "." + "s3"); + String newPath = mutableRequest.encodedPath().replaceFirst("/" + bucketName, ""); + mutableRequest.host(newHost).encodedPath(newPath); + } + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/endpoints/S3EndpointResolver.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/endpoints/S3EndpointResolver.java new file mode 100644 index 000000000000..9f8ab6e66910 --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/endpoints/S3EndpointResolver.java @@ -0,0 +1,31 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.endpoints; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.services.s3.internal.ConfiguredS3SdkHttpRequest; + +/** + * An S3 endpoint resolver returns a {@link ConfiguredS3SdkHttpRequest} based on the HTTP context and previously + * set execution attributes. + *

+ * @see software.amazon.awssdk.services.s3.internal.handlers.EndpointAddressInterceptor + */ +@SdkInternalApi +public interface S3EndpointResolver { + + ConfiguredS3SdkHttpRequest applyEndpointConfiguration(S3EndpointResolverContext context); +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/endpoints/S3EndpointResolverContext.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/endpoints/S3EndpointResolverContext.java new file mode 100644 index 000000000000..48cc41a9f37c --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/endpoints/S3EndpointResolverContext.java @@ -0,0 +1,143 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.endpoints; + +import java.util.Objects; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.SdkRequest; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Configuration; + +/** + * Contains the information needed to resolve S3 endpoints. + */ +@SdkInternalApi +public final class S3EndpointResolverContext { + private final SdkHttpRequest request; + private final SdkRequest originalRequest; + private final Region region; + private final S3Configuration serviceConfiguration; + private final boolean endpointOverridden; + + private S3EndpointResolverContext(Builder builder) { + this.request = builder.request; + this.originalRequest = builder.originalRequest; + this.region = builder.region; + this.serviceConfiguration = builder.serviceConfiguration; + this.endpointOverridden = builder.endpointOverridden; + } + + public static Builder builder() { + return new Builder(); + } + + public SdkHttpRequest request() { + return request; + } + + public SdkRequest originalRequest() { + return originalRequest; + } + + public Region region() { + return region; + } + + public S3Configuration serviceConfiguration() { + return serviceConfiguration; + } + + public boolean endpointOverridden() { + return endpointOverridden; + } + + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + S3EndpointResolverContext that = (S3EndpointResolverContext) o; + return endpointOverridden == that.endpointOverridden && + Objects.equals(request, that.request) && + Objects.equals(originalRequest, that.originalRequest) && + Objects.equals(region, that.region) && + Objects.equals(serviceConfiguration, that.serviceConfiguration); + } + + @Override + public int hashCode() { + int hashCode = 1; + hashCode = 31 * hashCode + Objects.hashCode(request()); + hashCode = 31 * hashCode + Objects.hashCode(originalRequest()); + hashCode = 31 * hashCode + Objects.hashCode(region()); + hashCode = 31 * hashCode + Objects.hashCode(serviceConfiguration()); + hashCode = 31 * hashCode + Objects.hashCode(endpointOverridden()); + return hashCode; + } + + public Builder toBuilder() { + return builder().endpointOverridden(endpointOverridden) + .request(request) + .originalRequest(originalRequest) + .region(region) + .serviceConfiguration(serviceConfiguration); + } + + public static final class Builder { + private SdkHttpRequest request; + private SdkRequest originalRequest; + private Region region; + private S3Configuration serviceConfiguration; + private boolean endpointOverridden; + + private Builder() { + } + + public Builder request(SdkHttpRequest request) { + this.request = request; + return this; + } + + public Builder originalRequest(SdkRequest originalRequest) { + this.originalRequest = originalRequest; + return this; + } + + public Builder region(Region region) { + this.region = region; + return this; + } + + public Builder serviceConfiguration(S3Configuration serviceConfiguration) { + this.serviceConfiguration = serviceConfiguration; + return this; + } + + public Builder endpointOverridden(boolean endpointOverridden) { + this.endpointOverridden = endpointOverridden; + return this; + } + + public S3EndpointResolverContext build() { + return new S3EndpointResolverContext(this); + } + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/endpoints/S3EndpointResolverFactory.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/endpoints/S3EndpointResolverFactory.java new file mode 100644 index 000000000000..5db130d41c3c --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/endpoints/S3EndpointResolverFactory.java @@ -0,0 +1,38 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.endpoints; + +import software.amazon.awssdk.annotations.SdkInternalApi; + +/** + * Get endpoint resolver. + */ +@SdkInternalApi +public final class S3EndpointResolverFactory { + + private static final S3EndpointResolver ACCESS_POINT_ENDPOINT_RESOLVER = S3AccessPointEndpointResolver.create(); + private static final S3EndpointResolver BUCKET_ENDPOINT_RESOLVER = S3BucketEndpointResolver.create(); + + private S3EndpointResolverFactory() { + } + + public static S3EndpointResolver getEndpointResolver(String bucketName) { + if (bucketName != null && S3EndpointUtils.isArn(bucketName)) { + return ACCESS_POINT_ENDPOINT_RESOLVER; + } + return BUCKET_ENDPOINT_RESOLVER; + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/endpoints/S3EndpointUtils.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/endpoints/S3EndpointUtils.java new file mode 100644 index 000000000000..d3176b734b0e --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/endpoints/S3EndpointUtils.java @@ -0,0 +1,146 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.endpoints; + +import java.net.URI; +import java.net.URISyntaxException; +import java.util.Arrays; +import java.util.List; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.SdkRequest; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.services.s3.S3Configuration; +import software.amazon.awssdk.services.s3.model.CreateBucketRequest; +import software.amazon.awssdk.services.s3.model.DeleteBucketRequest; +import software.amazon.awssdk.services.s3.model.ListBucketsRequest; + +/** + * Utilities for working with Amazon S3 bucket names and endpoints. + */ +@SdkInternalApi +public final class S3EndpointUtils { + + private static final List> ACCELERATE_DISABLED_OPERATIONS = Arrays.asList( + ListBucketsRequest.class, CreateBucketRequest.class, DeleteBucketRequest.class); + + private S3EndpointUtils() { + } + + public static String removeFipsIfNeeded(String region) { + if (region.startsWith("fips-")) { + return region.replace("fips-", ""); + } + + if (region.endsWith("-fips")) { + return region.replace("-fips", ""); + } + return region; + } + + /** + * Returns whether a FIPS pseudo region is provided. + */ + public static boolean isFipsRegionProvided(String clientRegion, String arnRegion, boolean useArnRegion) { + if (useArnRegion) { + return isFipsRegion(arnRegion); + } + return isFipsRegion(clientRegion); + } + + public static boolean isFipsRegion(String region) { + return region.startsWith("fips-") || region.endsWith("-fips"); + } + + /** + * @return True if accelerate mode is enabled per {@link S3Configuration}, false if not. + */ + public static boolean isAccelerateEnabled(S3Configuration serviceConfiguration) { + return serviceConfiguration != null && serviceConfiguration.accelerateModeEnabled(); + } + + /** + * @param originalRequest Request object to identify the operation. + * @return True if accelerate is supported for the given operation, false if not. + */ + public static boolean isAccelerateSupported(SdkRequest originalRequest) { + return !ACCELERATE_DISABLED_OPERATIONS.contains(originalRequest.getClass()); + } + + /** + * @return The endpoint for an S3 accelerate enabled operation. S3 accelerate has a single global endpoint. + */ + public static URI accelerateEndpoint(S3Configuration serviceConfiguration, String domain, String protocol) { + if (serviceConfiguration.dualstackEnabled()) { + return toUri(protocol, "s3-accelerate.dualstack." + domain); + } + return toUri(protocol, "s3-accelerate." + domain); + } + + /** + * @return True if dualstack is enabled per {@link S3Configuration}, false if not. + */ + public static boolean isDualstackEnabled(S3Configuration serviceConfiguration) { + return serviceConfiguration != null && serviceConfiguration.dualstackEnabled(); + } + + /** + * @return dual stack endpoint from given protocol and region metadata + */ + public static URI dualstackEndpoint(String id, String domain, String protocol) { + String serviceEndpoint = String.format("%s.%s.%s.%s", "s3", "dualstack", id, domain); + return toUri(protocol, serviceEndpoint); + } + + /** + * @return True if path style access is enabled per {@link S3Configuration}, false if not. + */ + public static boolean isPathStyleAccessEnabled(S3Configuration serviceConfiguration) { + return serviceConfiguration != null && serviceConfiguration.pathStyleAccessEnabled(); + } + + public static boolean isArnRegionEnabled(S3Configuration serviceConfiguration) { + return serviceConfiguration != null && serviceConfiguration.useArnRegionEnabled(); + } + + /** + * Changes from path style addressing (which the marshallers produce by default, to DNS style or virtual style addressing + * where the bucket name is prepended to the host. DNS style addressing is preferred due to the better load balancing + * qualities it provides, path style is an option mainly for proxy based situations and alternative S3 implementations. + * + * @param mutableRequest Marshalled HTTP request we are modifying. + * @param bucketName Bucket name for this particular operation. + */ + public static void changeToDnsEndpoint(SdkHttpRequest.Builder mutableRequest, String bucketName) { + if (mutableRequest.host().startsWith("s3")) { + String newHost = mutableRequest.host().replaceFirst("s3", bucketName + "." + "s3"); + String newPath = mutableRequest.encodedPath().replaceFirst("/" + bucketName, ""); + + mutableRequest.host(newHost).encodedPath(newPath); + } + } + + public static boolean isArn(String s) { + return s.startsWith("arn:"); + } + + private static URI toUri(String protocol, String endpoint) { + try { + return new URI(String.format("%s://%s", protocol, endpoint)); + } catch (URISyntaxException e) { + throw new IllegalArgumentException(e); + } + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/AddContentMd5HeaderInterceptor.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/AddContentMd5HeaderInterceptor.java deleted file mode 100644 index 8d2510e329de..000000000000 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/AddContentMd5HeaderInterceptor.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.s3.internal.handlers; - -import static software.amazon.awssdk.http.Header.CONTENT_MD5; - -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.io.UncheckedIOException; -import java.util.Arrays; -import java.util.List; -import java.util.Optional; -import software.amazon.awssdk.annotations.SdkInternalApi; -import software.amazon.awssdk.core.interceptor.Context; -import software.amazon.awssdk.core.interceptor.ExecutionAttribute; -import software.amazon.awssdk.core.interceptor.ExecutionAttributes; -import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; -import software.amazon.awssdk.core.sync.RequestBody; -import software.amazon.awssdk.http.SdkHttpRequest; -import software.amazon.awssdk.services.s3.model.PutObjectRequest; -import software.amazon.awssdk.services.s3.model.UploadPartRequest; -import software.amazon.awssdk.utils.IoUtils; -import software.amazon.awssdk.utils.Md5Utils; - -@SdkInternalApi -public class AddContentMd5HeaderInterceptor implements ExecutionInterceptor { - - private static final ExecutionAttribute CONTENT_MD5_ATTRIBUTE = new ExecutionAttribute<>("contentMd5"); - - // List of operations that should be ignored by this interceptor. - // These are costly operations, so adding the md5 header will take a performance hit - private static final List BLACKLIST_METHODS = Arrays.asList(PutObjectRequest.class, UploadPartRequest.class); - - @Override - public Optional modifyHttpContent(Context.ModifyHttpRequest context, - ExecutionAttributes executionAttributes) { - - if (!BLACKLIST_METHODS.contains(context.request().getClass()) && context.requestBody().isPresent() - && !context.httpRequest().firstMatchingHeader(CONTENT_MD5).isPresent()) { - - try { - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - IoUtils.copy(context.requestBody().get().contentStreamProvider().newStream(), baos); - executionAttributes.putAttribute(CONTENT_MD5_ATTRIBUTE, Md5Utils.md5AsBase64(baos.toByteArray())); - return context.requestBody(); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - } - - return context.requestBody(); - } - - @Override - public SdkHttpRequest modifyHttpRequest(Context.ModifyHttpRequest context, - ExecutionAttributes executionAttributes) { - String contentMd5 = executionAttributes.getAttribute(CONTENT_MD5_ATTRIBUTE); - - if (contentMd5 != null) { - return context.httpRequest().toBuilder().putHeader(CONTENT_MD5, contentMd5).build(); - } - - return context.httpRequest(); - } -} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/EndpointAddressInterceptor.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/EndpointAddressInterceptor.java index 0989ed977c37..3f6c4c9eece4 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/EndpointAddressInterceptor.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/EndpointAddressInterceptor.java @@ -25,7 +25,8 @@ import software.amazon.awssdk.http.SdkHttpRequest; import software.amazon.awssdk.services.s3.S3Configuration; import software.amazon.awssdk.services.s3.internal.ConfiguredS3SdkHttpRequest; -import software.amazon.awssdk.services.s3.internal.S3EndpointUtils; +import software.amazon.awssdk.services.s3.internal.endpoints.S3EndpointResolverContext; +import software.amazon.awssdk.services.s3.internal.endpoints.S3EndpointResolverFactory; @SdkInternalApi public final class EndpointAddressInterceptor implements ExecutionInterceptor { @@ -33,19 +34,31 @@ public final class EndpointAddressInterceptor implements ExecutionInterceptor { @Override public SdkHttpRequest modifyHttpRequest(Context.ModifyHttpRequest context, ExecutionAttributes executionAttributes) { - ConfiguredS3SdkHttpRequest configuredRequest = - S3EndpointUtils.applyEndpointConfiguration( - context.httpRequest(), - context.request(), - executionAttributes.getAttribute(AwsExecutionAttribute.AWS_REGION), - (S3Configuration) executionAttributes.getAttribute(AwsSignerExecutionAttribute.SERVICE_CONFIG), - Boolean.TRUE.equals(executionAttributes.getAttribute(SdkExecutionAttribute.ENDPOINT_OVERRIDDEN))); + + boolean endpointOverride = + Boolean.TRUE.equals(executionAttributes.getAttribute(SdkExecutionAttribute.ENDPOINT_OVERRIDDEN)); + S3Configuration serviceConfiguration = + (S3Configuration) executionAttributes.getAttribute(AwsSignerExecutionAttribute.SERVICE_CONFIG); + S3EndpointResolverContext resolverContext = + S3EndpointResolverContext.builder() + .request(context.httpRequest()) + .originalRequest(context.request()) + .region(executionAttributes.getAttribute(AwsExecutionAttribute.AWS_REGION)) + .endpointOverridden(endpointOverride) + .serviceConfiguration(serviceConfiguration) + .build(); + + String bucketName = context.request().getValueForField("Bucket", String.class).orElse(null); + ConfiguredS3SdkHttpRequest configuredRequest = S3EndpointResolverFactory.getEndpointResolver(bucketName) + .applyEndpointConfiguration(resolverContext); configuredRequest.signingRegionModification().ifPresent( region -> executionAttributes.putAttribute(AwsSignerExecutionAttribute.SIGNING_REGION, region)); configuredRequest.signingServiceModification().ifPresent( name -> executionAttributes.putAttribute(AwsSignerExecutionAttribute.SERVICE_SIGNING_NAME, name)); + return configuredRequest.sdkHttpRequest(); } + } diff --git a/services/s3/src/main/resources/codegen-resources/service-2.json b/services/s3/src/main/resources/codegen-resources/service-2.json index f4d01e982985..1dbcb9f7954b 100644 --- a/services/s3/src/main/resources/codegen-resources/service-2.json +++ b/services/s3/src/main/resources/codegen-resources/service-2.json @@ -51,7 +51,7 @@ {"shape":"ObjectNotInActiveTierError"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectCOPY.html", - "documentation":"

Creates a copy of an object that is already stored in Amazon S3.

You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic operation using this API. However, to copy an object greater than 5 GB, you must use the multipart upload Upload Part - Copy API. For more information, see Copy Object Using the REST Multipart Upload API.

All copy requests must be authenticated. Additionally, you must have read access to the source object and write access to the destination bucket. For more information, see REST Authentication. Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account.

A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. If the error occurs before the copy operation starts, you receive a standard Amazon S3 error. If the error occurs during the copy operation, the error response is embedded in the 200 OK response. This means that a 200 OK response can contain either a success or an error. Design your application to parse the contents of the response and handle it appropriately.

If the copy is successful, you receive a response with information about the copied object.

If the request is an HTTP 1.1 request, the response is chunk encoded. If it were not, it would not contain the content-length, and you would need to read the entire body.

The copy request charge is based on the storage class and Region that you specify for the destination object. For pricing information, see Amazon S3 pricing.

Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad Request error. For more information, see Transfer Acceleration.

Metadata

When copying an object, you can preserve all metadata (default) or specify new metadata. However, the ACL is not preserved and is set to private for the user making the request. To override the default ACL setting, specify a new ACL when generating a copy request. For more information, see Using ACLs.

To specify whether you want the object metadata copied from the source object or replaced with metadata provided in the request, you can optionally add the x-amz-metadata-directive header. When you grant permissions, you can use the s3:x-amz-metadata-directive condition key to enforce certain metadata behavior when objects are uploaded. For more information, see Specifying Conditions in a Policy in the Amazon S3 Developer Guide. For a complete list of Amazon S3-specific condition keys, see Actions, Resources, and Condition Keys for Amazon S3.

x-amz-copy-source-if Headers

To only copy an object under certain conditions, such as whether the Etag matches or whether the object was modified before or after a specified date, use the following request parameters:

  • x-amz-copy-source-if-match

  • x-amz-copy-source-if-none-match

  • x-amz-copy-source-if-unmodified-since

  • x-amz-copy-source-if-modified-since

If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since headers are present in the request and evaluate as follows, Amazon S3 returns 200 OK and copies the data:

  • x-amz-copy-source-if-match condition evaluates to true

  • x-amz-copy-source-if-unmodified-since condition evaluates to false

If both the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since headers are present in the request and evaluate as follows, Amazon S3 returns the 412 Precondition Failed response code:

  • x-amz-copy-source-if-none-match condition evaluates to false

  • x-amz-copy-source-if-modified-since condition evaluates to true

All headers with the x-amz- prefix, including x-amz-copy-source, must be signed.

Encryption

The source object that you are copying can be encrypted or unencrypted. The source object can be encrypted with server-side encryption using AWS managed encryption keys (SSE-S3 or SSE-KMS) or by using a customer-provided encryption key. With server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts the data when you access it.

You can optionally use the appropriate encryption-related headers to request server-side encryption for the target object. You have the option to provide your own encryption key or use SSE-S3 or SSE-KMS, regardless of the form of server-side encryption that was used to encrypt the source object. You can even request encryption if the source object was not encrypted. For more information about server-side encryption, see Using Server-Side Encryption.

Access Control List (ACL)-Specific Request Headers

When copying an object, you can optionally use headers to grant ACL-based permissions. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the ACL on the object. For more information, see Access Control List (ACL) Overview and Managing ACLs Using the REST API.

Storage Class Options

You can use the CopyObject operation to change the storage class of an object that is already stored in Amazon S3 using the StorageClass parameter. For more information, see Storage Classes in the Amazon S3 Service Developer Guide.

Versioning

By default, x-amz-copy-source identifies the current version of an object to copy. If the current version is a delete marker, Amazon S3 behaves as if the object was deleted. To copy a different version, use the versionId subresource.

If you enable versioning on the target bucket, Amazon S3 generates a unique version ID for the object being copied. This version ID is different from the version ID of the source object. Amazon S3 returns the version ID of the copied object in the x-amz-version-id response header in the response.

If you do not enable versioning or suspend it on the target bucket, the version ID that Amazon S3 generates is always null.

If the source object's storage class is GLACIER, you must restore a copy of this object before you can use it as a source object for the copy operation. For more information, see RestoreObject.

The following operations are related to CopyObject:

For more information, see Copying Objects.

", + "documentation":"

Creates a copy of an object that is already stored in Amazon S3.

You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic operation using this API. However, to copy an object greater than 5 GB, you must use the multipart upload Upload Part - Copy API. For more information, see Copy Object Using the REST Multipart Upload API.

All copy requests must be authenticated. Additionally, you must have read access to the source object and write access to the destination bucket. For more information, see REST Authentication. Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account.

A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. If the error occurs before the copy operation starts, you receive a standard Amazon S3 error. If the error occurs during the copy operation, the error response is embedded in the 200 OK response. This means that a 200 OK response can contain either a success or an error. Design your application to parse the contents of the response and handle it appropriately.

If the copy is successful, you receive a response with information about the copied object.

If the request is an HTTP 1.1 request, the response is chunk encoded. If it were not, it would not contain the content-length, and you would need to read the entire body.

The copy request charge is based on the storage class and Region that you specify for the destination object. For pricing information, see Amazon S3 pricing.

Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad Request error. For more information, see Transfer Acceleration.

Metadata

When copying an object, you can preserve all metadata (default) or specify new metadata. However, the ACL is not preserved and is set to private for the user making the request. To override the default ACL setting, specify a new ACL when generating a copy request. For more information, see Using ACLs.

To specify whether you want the object metadata copied from the source object or replaced with metadata provided in the request, you can optionally add the x-amz-metadata-directive header. When you grant permissions, you can use the s3:x-amz-metadata-directive condition key to enforce certain metadata behavior when objects are uploaded. For more information, see Specifying Conditions in a Policy in the Amazon S3 Developer Guide. For a complete list of Amazon S3-specific condition keys, see Actions, Resources, and Condition Keys for Amazon S3.

x-amz-copy-source-if Headers

To only copy an object under certain conditions, such as whether the Etag matches or whether the object was modified before or after a specified date, use the following request parameters:

  • x-amz-copy-source-if-match

  • x-amz-copy-source-if-none-match

  • x-amz-copy-source-if-unmodified-since

  • x-amz-copy-source-if-modified-since

If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since headers are present in the request and evaluate as follows, Amazon S3 returns 200 OK and copies the data:

  • x-amz-copy-source-if-match condition evaluates to true

  • x-amz-copy-source-if-unmodified-since condition evaluates to false

If both the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since headers are present in the request and evaluate as follows, Amazon S3 returns the 412 Precondition Failed response code:

  • x-amz-copy-source-if-none-match condition evaluates to false

  • x-amz-copy-source-if-modified-since condition evaluates to true

All headers with the x-amz- prefix, including x-amz-copy-source, must be signed.

Server-side encryption

When you perform a CopyObject operation, you can optionally use the appropriate encryption-related headers to encrypt the object using server-side encryption with AWS managed encryption keys (SSE-S3 or SSE-KMS) or a customer-provided encryption key. With server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts the data when you access it. For more information about server-side encryption, see Using Server-Side Encryption.

If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object. For more information, see Amazon S3 Bucket Keys in the Amazon Simple Storage Service Developer Guide.

Access Control List (ACL)-Specific Request Headers

When copying an object, you can optionally use headers to grant ACL-based permissions. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the ACL on the object. For more information, see Access Control List (ACL) Overview and Managing ACLs Using the REST API.

Storage Class Options

You can use the CopyObject operation to change the storage class of an object that is already stored in Amazon S3 using the StorageClass parameter. For more information, see Storage Classes in the Amazon S3 Service Developer Guide.

Versioning

By default, x-amz-copy-source identifies the current version of an object to copy. If the current version is a delete marker, Amazon S3 behaves as if the object was deleted. To copy a different version, use the versionId subresource.

If you enable versioning on the target bucket, Amazon S3 generates a unique version ID for the object being copied. This version ID is different from the version ID of the source object. Amazon S3 returns the version ID of the copied object in the x-amz-version-id response header in the response.

If you do not enable versioning or suspend it on the target bucket, the version ID that Amazon S3 generates is always null.

If the source object's storage class is GLACIER, you must restore a copy of this object before you can use it as a source object for the copy operation. For more information, see RestoreObject.

The following operations are related to CopyObject:

For more information, see Copying Objects.

", "alias":"PutObjectCopy" }, "CreateBucket":{ @@ -124,6 +124,16 @@ "input":{"shape":"DeleteBucketEncryptionRequest"}, "documentation":"

This implementation of the DELETE operation removes default encryption from the bucket. For information about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket Encryption in the Amazon Simple Storage Service Developer Guide.

To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

Related Resources

" }, + "DeleteBucketIntelligentTieringConfiguration":{ + "name":"DeleteBucketIntelligentTieringConfiguration", + "http":{ + "method":"DELETE", + "requestUri":"/{Bucket}?intelligent-tiering", + "responseCode":204 + }, + "input":{"shape":"DeleteBucketIntelligentTieringConfigurationRequest"}, + "documentation":"

Deletes the S3 Intelligent-Tiering configuration from the specified bucket.

The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without additional operational overhead. S3 Intelligent-Tiering delivers automatic cost savings by moving data between access tiers, when access patterns change.

The S3 Intelligent-Tiering storage class is suitable for objects larger than 128 KB that you plan to store for at least 30 days. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the frequent access tier rates in the S3 Intelligent-Tiering storage class.

If you delete an object before the end of the 30-day minimum storage duration period, you are charged for 30 days. For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.

Operations related to DeleteBucketIntelligentTieringConfiguration include:

" + }, "DeleteBucketInventoryConfiguration":{ "name":"DeleteBucketInventoryConfiguration", "http":{ @@ -306,6 +316,16 @@ "output":{"shape":"GetBucketEncryptionOutput"}, "documentation":"

Returns the default encryption configuration for an Amazon S3 bucket. For information about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket Encryption.

To use this operation, you must have permission to perform the s3:GetEncryptionConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

The following operations are related to GetBucketEncryption:

" }, + "GetBucketIntelligentTieringConfiguration":{ + "name":"GetBucketIntelligentTieringConfiguration", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}?intelligent-tiering" + }, + "input":{"shape":"GetBucketIntelligentTieringConfigurationRequest"}, + "output":{"shape":"GetBucketIntelligentTieringConfigurationOutput"}, + "documentation":"

Gets the S3 Intelligent-Tiering configuration from the specified bucket.

The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without additional operational overhead. S3 Intelligent-Tiering delivers automatic cost savings by moving data between access tiers, when access patterns change.

The S3 Intelligent-Tiering storage class is suitable for objects larger than 128 KB that you plan to store for at least 30 days. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the frequent access tier rates in the S3 Intelligent-Tiering storage class.

If you delete an object before the end of the 30-day minimum storage duration period, you are charged for 30 days. For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.

Operations related to GetBucketIntelligentTieringConfiguration include:

" + }, "GetBucketInventoryConfiguration":{ "name":"GetBucketInventoryConfiguration", "http":{ @@ -486,10 +506,11 @@ "input":{"shape":"GetObjectRequest"}, "output":{"shape":"GetObjectOutput"}, "errors":[ - {"shape":"NoSuchKey"} + {"shape":"NoSuchKey"}, + {"shape":"InvalidObjectState"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectGET.html", - "documentation":"

Retrieves objects from Amazon S3. To use GET, you must have READ access to the object. If you grant READ access to the anonymous user, you can return the object without using an authorization header.

An Amazon S3 bucket has no directory hierarchy such as you would find in a typical computer file system. You can, however, create a logical hierarchy by using object key names that imply a folder structure. For example, instead of naming an object sample.jpg, you can name it photos/2006/February/sample.jpg.

To get an object from such a logical hierarchy, specify the full key name for the object in the GET operation. For a virtual hosted-style request example, if you have the object photos/2006/February/sample.jpg, specify the resource as /photos/2006/February/sample.jpg. For a path-style request example, if you have the object photos/2006/February/sample.jpg in the bucket named examplebucket, specify the resource as /examplebucket/photos/2006/February/sample.jpg. For more information about request types, see HTTP Host Header Bucket Specification.

To distribute large files to many people, you can save bandwidth costs by using BitTorrent. For more information, see Amazon S3 Torrent. For more information about returning the ACL of an object, see GetObjectAcl.

If the object you are retrieving is stored in the GLACIER or DEEP_ARCHIVE storage classes, before you can retrieve the object you must first restore a copy using RestoreObject. Otherwise, this operation returns an InvalidObjectStateError error. For information about restoring archived objects, see Restoring Archived Objects.

Encryption request headers, like x-amz-server-side-encryption, should not be sent for GET requests if your object uses server-side encryption with CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 BadRequest error.

If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you GET the object, you must use the following headers:

  • x-amz-server-side-encryption-customer-algorithm

  • x-amz-server-side-encryption-customer-key

  • x-amz-server-side-encryption-customer-key-MD5

For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys).

Assuming you have permission to read object tags (permission for the s3:GetObjectVersionTagging action), the response also returns the x-amz-tagging-count header that provides the count of number of tags associated with the object. You can use GetObjectTagging to retrieve the tag set associated with an object.

Permissions

You need the s3:GetObject permission for this operation. For more information, see Specifying Permissions in a Policy. If the object you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket permission.

  • If you have the s3:ListBucket permission on the bucket, Amazon S3 will return an HTTP status code 404 (\"no such key\") error.

  • If you don’t have the s3:ListBucket permission, Amazon S3 will return an HTTP status code 403 (\"access denied\") error.

Versioning

By default, the GET operation returns the current version of an object. To return a different version, use the versionId subresource.

If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true in the response.

For more information about versioning, see PutBucketVersioning.

Overriding Response Header Values

There are times when you want to override certain response header values in a GET response. For example, you might override the Content-Disposition response header value in your GET request.

You can override values for a set of response headers using the following query parameters. These response header values are sent only on a successful request, that is, when status code 200 OK is returned. The set of headers you can override using these parameters is a subset of the headers that Amazon S3 accepts when you create an object. The response headers that you can override for the GET response are Content-Type, Content-Language, Expires, Cache-Control, Content-Disposition, and Content-Encoding. To override these header values in the GET response, you use the following request parameters.

You must sign the request, either using an Authorization header or a presigned URL, when using these parameters. They cannot be used with an unsigned (anonymous) request.

  • response-content-type

  • response-content-language

  • response-expires

  • response-cache-control

  • response-content-disposition

  • response-content-encoding

Additional Considerations about Request Headers

If both of the If-Match and If-Unmodified-Since headers are present in the request as follows: If-Match condition evaluates to true, and; If-Unmodified-Since condition evaluates to false; then, S3 returns 200 OK and the data requested.

If both of the If-None-Match and If-Modified-Since headers are present in the request as follows: If-None-Match condition evaluates to false, and; If-Modified-Since condition evaluates to true; then, S3 returns 304 Not Modified response code.

For more information about conditional requests, see RFC 7232.

The following operations are related to GetObject:

" + "documentation":"

Retrieves objects from Amazon S3. To use GET, you must have READ access to the object. If you grant READ access to the anonymous user, you can return the object without using an authorization header.

An Amazon S3 bucket has no directory hierarchy such as you would find in a typical computer file system. You can, however, create a logical hierarchy by using object key names that imply a folder structure. For example, instead of naming an object sample.jpg, you can name it photos/2006/February/sample.jpg.

To get an object from such a logical hierarchy, specify the full key name for the object in the GET operation. For a virtual hosted-style request example, if you have the object photos/2006/February/sample.jpg, specify the resource as /photos/2006/February/sample.jpg. For a path-style request example, if you have the object photos/2006/February/sample.jpg in the bucket named examplebucket, specify the resource as /examplebucket/photos/2006/February/sample.jpg. For more information about request types, see HTTP Host Header Bucket Specification.

To distribute large files to many people, you can save bandwidth costs by using BitTorrent. For more information, see Amazon S3 Torrent. For more information about returning the ACL of an object, see GetObjectAcl.

If the object you are retrieving is stored in the S3 Glacier or S3 Glacier Deep Archive storage class, or S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers, before you can retrieve the object you must first restore a copy using RestoreObject. Otherwise, this operation returns an InvalidObjectStateError error. For information about restoring archived objects, see Restoring Archived Objects.

Encryption request headers, like x-amz-server-side-encryption, should not be sent for GET requests if your object uses server-side encryption with CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 BadRequest error.

If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you GET the object, you must use the following headers:

  • x-amz-server-side-encryption-customer-algorithm

  • x-amz-server-side-encryption-customer-key

  • x-amz-server-side-encryption-customer-key-MD5

For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys).

Assuming you have permission to read object tags (permission for the s3:GetObjectVersionTagging action), the response also returns the x-amz-tagging-count header that provides the count of number of tags associated with the object. You can use GetObjectTagging to retrieve the tag set associated with an object.

Permissions

You need the s3:GetObject permission for this operation. For more information, see Specifying Permissions in a Policy. If the object you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket permission.

  • If you have the s3:ListBucket permission on the bucket, Amazon S3 will return an HTTP status code 404 (\"no such key\") error.

  • If you don’t have the s3:ListBucket permission, Amazon S3 will return an HTTP status code 403 (\"access denied\") error.

Versioning

By default, the GET operation returns the current version of an object. To return a different version, use the versionId subresource.

If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true in the response.

For more information about versioning, see PutBucketVersioning.

Overriding Response Header Values

There are times when you want to override certain response header values in a GET response. For example, you might override the Content-Disposition response header value in your GET request.

You can override values for a set of response headers using the following query parameters. These response header values are sent only on a successful request, that is, when status code 200 OK is returned. The set of headers you can override using these parameters is a subset of the headers that Amazon S3 accepts when you create an object. The response headers that you can override for the GET response are Content-Type, Content-Language, Expires, Cache-Control, Content-Disposition, and Content-Encoding. To override these header values in the GET response, you use the following request parameters.

You must sign the request, either using an Authorization header or a presigned URL, when using these parameters. They cannot be used with an unsigned (anonymous) request.

  • response-content-type

  • response-content-language

  • response-expires

  • response-cache-control

  • response-content-disposition

  • response-content-encoding

Additional Considerations about Request Headers

If both of the If-Match and If-Unmodified-Since headers are present in the request as follows: If-Match condition evaluates to true, and; If-Unmodified-Since condition evaluates to false; then, S3 returns 200 OK and the data requested.

If both of the If-None-Match and If-Modified-Since headers are present in the request as follows: If-None-Match condition evaluates to false, and; If-Modified-Since condition evaluates to true; then, S3 returns 304 Not Modified response code.

For more information about conditional requests, see RFC 7232.

The following operations are related to GetObject:

" }, "GetObjectAcl":{ "name":"GetObjectAcl", @@ -603,6 +624,16 @@ "output":{"shape":"ListBucketAnalyticsConfigurationsOutput"}, "documentation":"

Lists the analytics configurations for the bucket. You can have up to 1,000 analytics configurations per bucket.

This operation supports list pagination and does not return more than 100 configurations at a time. You should always check the IsTruncated element in the response. If there are no more configurations to list, IsTruncated is set to false. If there are more configurations to list, IsTruncated is set to true, and there will be a value in NextContinuationToken. You use the NextContinuationToken value to continue the pagination of the list by passing the value in continuation-token in the request to GET the next page.

To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

For information about Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class Analysis.

The following operations are related to ListBucketAnalyticsConfigurations:

" }, + "ListBucketIntelligentTieringConfigurations":{ + "name":"ListBucketIntelligentTieringConfigurations", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}?intelligent-tiering" + }, + "input":{"shape":"ListBucketIntelligentTieringConfigurationsRequest"}, + "output":{"shape":"ListBucketIntelligentTieringConfigurationsOutput"}, + "documentation":"

Lists the S3 Intelligent-Tiering configuration from the specified bucket.

The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without additional operational overhead. S3 Intelligent-Tiering delivers automatic cost savings by moving data between access tiers, when access patterns change.

The S3 Intelligent-Tiering storage class is suitable for objects larger than 128 KB that you plan to store for at least 30 days. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the frequent access tier rates in the S3 Intelligent-Tiering storage class.

If you delete an object before the end of the 30-day minimum storage duration period, you are charged for 30 days. For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.

Operations related to ListBucketIntelligentTieringConfigurations include:

" + }, "ListBucketInventoryConfigurations":{ "name":"ListBucketInventoryConfigurations", "http":{ @@ -743,9 +774,18 @@ "requestUri":"/{Bucket}?encryption" }, "input":{"shape":"PutBucketEncryptionRequest"}, - "documentation":"

This implementation of the PUT operation uses the encryption subresource to set the default encryption state of an existing bucket.

This implementation of the PUT operation sets default encryption for a bucket using server-side encryption with Amazon S3-managed keys SSE-S3 or AWS KMS customer master keys (CMKs) (SSE-KMS). For information about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket Encryption.

This operation requires AWS Signature Version 4. For more information, see Authenticating Requests (AWS Signature Version 4).

To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

Related Resources

", + "documentation":"

This operation uses the encryption subresource to configure default encryption and Amazon S3 Bucket Key for an existing bucket.

Default encryption for a bucket can use server-side encryption with Amazon S3-managed keys (SSE-S3) or AWS KMS customer master keys (SSE-KMS). If you specify default encryption using SSE-KMS, you can also configure Amazon S3 Bucket Key. For information about default encryption, see Amazon S3 default bucket encryption in the Amazon Simple Storage Service Developer Guide. For more information about S3 Bucket Keys, see Amazon S3 Bucket Keys in the Amazon Simple Storage Service Developer Guide.

This operation requires AWS Signature Version 4. For more information, see Authenticating Requests (AWS Signature Version 4).

To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

Related Resources

", "httpChecksumRequired":true }, + "PutBucketIntelligentTieringConfiguration":{ + "name":"PutBucketIntelligentTieringConfiguration", + "http":{ + "method":"PUT", + "requestUri":"/{Bucket}?intelligent-tiering" + }, + "input":{"shape":"PutBucketIntelligentTieringConfigurationRequest"}, + "documentation":"

Puts a S3 Intelligent-Tiering configuration to the specified bucket.

The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without additional operational overhead. S3 Intelligent-Tiering delivers automatic cost savings by moving data between access tiers, when access patterns change.

The S3 Intelligent-Tiering storage class is suitable for objects larger than 128 KB that you plan to store for at least 30 days. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the frequent access tier rates in the S3 Intelligent-Tiering storage class.

If you delete an object before the end of the 30-day minimum storage duration period, you are charged for 30 days. For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.

Operations related to PutBucketIntelligentTieringConfiguration include:

" + }, "PutBucketInventoryConfiguration":{ "name":"PutBucketInventoryConfiguration", "http":{ @@ -825,7 +865,8 @@ "requestUri":"/{Bucket}?ownershipControls" }, "input":{"shape":"PutBucketOwnershipControlsRequest"}, - "documentation":"

Creates or modifies OwnershipControls for an Amazon S3 bucket. To use this operation, you must have the s3:GetBucketOwnershipControls permission. For more information about Amazon S3 permissions, see Specifying Permissions in a Policy.

For information about Amazon S3 Object Ownership, see Using Object Ownership.

The following operations are related to GetBucketOwnershipControls:

" + "documentation":"

Creates or modifies OwnershipControls for an Amazon S3 bucket. To use this operation, you must have the s3:PutBucketOwnershipControls permission. For more information about Amazon S3 permissions, see Specifying Permissions in a Policy.

For information about Amazon S3 Object Ownership, see Using Object Ownership.

The following operations are related to PutBucketOwnershipControls:

", + "httpChecksumRequired":true }, "PutBucketPolicy":{ "name":"PutBucketPolicy", @@ -845,7 +886,7 @@ "requestUri":"/{Bucket}?replication" }, "input":{"shape":"PutBucketReplicationRequest"}, - "documentation":"

Creates a replication configuration or replaces an existing one. For more information, see Replication in the Amazon S3 Developer Guide.

To perform this operation, the user or role performing the operation must have the iam:PassRole permission.

Specify the replication configuration in the request body. In the replication configuration, you provide the name of the destination bucket where you want Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume to replicate objects on your behalf, and other relevant information.

A replication configuration must include at least one rule, and can contain a maximum of 1,000. Each rule identifies a subset of objects to replicate by filtering the objects in the source bucket. To choose additional subsets of objects to replicate, add a rule for each subset. All rules must specify the same destination bucket.

To specify a subset of the objects in the source bucket to apply a replication rule to, add the Filter element as a child of the Rule element. You can filter objects based on an object key prefix, one or more object tags, or both. When you add the Filter element in the configuration, you must also add the following elements: DeleteMarkerReplication, Status, and Priority.

The latest version of the replication configuration XML is V2. XML V2 replication configurations are those that contain the Filter element for rules, and rules that specify S3 Replication Time Control (S3 RTC). In XML V2 replication configurations, Amazon S3 doesn't replicate delete markers. Therefore, you must set the DeleteMarkerReplication element to Disabled. For backward compatibility, Amazon S3 continues to support the XML V1 replication configuration.

For information about enabling versioning on a bucket, see Using Versioning.

By default, a resource owner, in this case the AWS account that created the bucket, can perform this operation. The resource owner can also grant others permissions to perform the operation. For more information about permissions, see Specifying Permissions in a Policy and Managing Access Permissions to Your Amazon S3 Resources.

Handling Replication of Encrypted Objects

By default, Amazon S3 doesn't replicate objects that are stored at rest using server-side encryption with CMKs stored in AWS KMS. To replicate AWS KMS-encrypted objects, add the following: SourceSelectionCriteria, SseKmsEncryptedObjects, Status, EncryptionConfiguration, and ReplicaKmsKeyID. For information about replication configuration, see Replicating Objects Created with SSE Using CMKs stored in AWS KMS.

For information on PutBucketReplication errors, see List of replication-related error codes

The following operations are related to PutBucketReplication:

", + "documentation":"

Creates a replication configuration or replaces an existing one. For more information, see Replication in the Amazon S3 Developer Guide.

To perform this operation, the user or role performing the operation must have the iam:PassRole permission.

Specify the replication configuration in the request body. In the replication configuration, you provide the name of the destination bucket or buckets where you want Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume to replicate objects on your behalf, and other relevant information.

A replication configuration must include at least one rule, and can contain a maximum of 1,000. Each rule identifies a subset of objects to replicate by filtering the objects in the source bucket. To choose additional subsets of objects to replicate, add a rule for each subset.

To specify a subset of the objects in the source bucket to apply a replication rule to, add the Filter element as a child of the Rule element. You can filter objects based on an object key prefix, one or more object tags, or both. When you add the Filter element in the configuration, you must also add the following elements: DeleteMarkerReplication, Status, and Priority.

If you are using an earlier version of the replication configuration, Amazon S3 handles replication of delete markers differently. For more information, see Backward Compatibility.

For information about enabling versioning on a bucket, see Using Versioning.

By default, a resource owner, in this case the AWS account that created the bucket, can perform this operation. The resource owner can also grant others permissions to perform the operation. For more information about permissions, see Specifying Permissions in a Policy and Managing Access Permissions to Your Amazon S3 Resources.

Handling Replication of Encrypted Objects

By default, Amazon S3 doesn't replicate objects that are stored at rest using server-side encryption with CMKs stored in AWS KMS. To replicate AWS KMS-encrypted objects, add the following: SourceSelectionCriteria, SseKmsEncryptedObjects, Status, EncryptionConfiguration, and ReplicaKmsKeyID. For information about replication configuration, see Replicating Objects Created with SSE Using CMKs stored in AWS KMS.

For information on PutBucketReplication errors, see List of replication-related error codes

The following operations are related to PutBucketReplication:

", "httpChecksumRequired":true }, "PutBucketRequestPayment":{ @@ -901,7 +942,7 @@ "input":{"shape":"PutObjectRequest"}, "output":{"shape":"PutObjectOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectPUT.html", - "documentation":"

Adds an object to a bucket. You must have WRITE permissions on a bucket to add an object to it.

Amazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the entire object to the bucket.

Amazon S3 is a distributed system. If it receives multiple write requests for the same object simultaneously, it overwrites all but the last object written. Amazon S3 does not provide object locking; if you need this, make sure to build it into your application layer or use versioning instead.

To ensure that data is not corrupted traversing the network, use the Content-MD5 header. When you use this header, Amazon S3 checks the object against the provided MD5 value and, if they do not match, returns an error. Additionally, you can calculate the MD5 while putting an object to Amazon S3 and compare the returned ETag to the calculated MD5 value.

The Content-MD5 header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview in the Amazon Simple Storage Service Developer Guide.

Server-side Encryption

You can optionally request server-side encryption. With server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts the data when you access it. You have the option to provide your own encryption key or use AWS managed encryption keys. For more information, see Using Server-Side Encryption.

Access Control List (ACL)-Specific Request Headers

You can use headers to grant ACL- based permissions. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the ACL on the object. For more information, see Access Control List (ACL) Overview and Managing ACLs Using the REST API.

Storage Class Options

By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, see Storage Classes in the Amazon S3 Service Developer Guide.

Versioning

If you enable versioning for a bucket, Amazon S3 automatically generates a unique version ID for the object being stored. Amazon S3 returns this ID in the response. When you enable versioning for a bucket, if Amazon S3 receives multiple write requests for the same object simultaneously, it stores all of the objects.

For more information about versioning, see Adding Objects to Versioning Enabled Buckets. For information about returning the versioning state of a bucket, see GetBucketVersioning.

Related Resources

" + "documentation":"

Adds an object to a bucket. You must have WRITE permissions on a bucket to add an object to it.

Amazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the entire object to the bucket.

Amazon S3 is a distributed system. If it receives multiple write requests for the same object simultaneously, it overwrites all but the last object written. Amazon S3 does not provide object locking; if you need this, make sure to build it into your application layer or use versioning instead.

To ensure that data is not corrupted traversing the network, use the Content-MD5 header. When you use this header, Amazon S3 checks the object against the provided MD5 value and, if they do not match, returns an error. Additionally, you can calculate the MD5 while putting an object to Amazon S3 and compare the returned ETag to the calculated MD5 value.

The Content-MD5 header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview in the Amazon Simple Storage Service Developer Guide.

Server-side Encryption

You can optionally request server-side encryption. With server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts the data when you access it. You have the option to provide your own encryption key or use AWS managed encryption keys (SSE-S3 or SSE-KMS). For more information, see Using Server-Side Encryption.

If you request server-side encryption using AWS Key Management Service (SSE-KMS), you can enable an S3 Bucket Key at the object-level. For more information, see Amazon S3 Bucket Keys in the Amazon Simple Storage Service Developer Guide.

Access Control List (ACL)-Specific Request Headers

You can use headers to grant ACL- based permissions. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the ACL on the object. For more information, see Access Control List (ACL) Overview and Managing ACLs Using the REST API.

Storage Class Options

By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, see Storage Classes in the Amazon S3 Service Developer Guide.

Versioning

If you enable versioning for a bucket, Amazon S3 automatically generates a unique version ID for the object being stored. Amazon S3 returns this ID in the response. When you enable versioning for a bucket, if Amazon S3 receives multiple write requests for the same object simultaneously, it stores all of the objects.

For more information about versioning, see Adding Objects to Versioning Enabled Buckets. For information about returning the versioning state of a bucket, see GetBucketVersioning.

Related Resources

" }, "PutObjectAcl":{ "name":"PutObjectAcl", @@ -984,7 +1025,7 @@ {"shape":"ObjectAlreadyInActiveTierError"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectRestore.html", - "documentation":"

Restores an archived copy of an object back into Amazon S3

This action is not supported by Amazon S3 on Outposts.

This action performs the following types of requests:

  • select - Perform a select query on an archived object

  • restore an archive - Restore an archived object

To use this operation, you must have permissions to perform the s3:RestoreObject action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

Querying Archives with Select Requests

You use a select type of request to perform SQL queries on archived objects. The archived objects that are being queried by the select request must be formatted as uncompressed comma-separated values (CSV) files. You can run queries and custom analytics on your archived data without having to restore your data to a hotter Amazon S3 tier. For an overview about select requests, see Querying Archived Objects in the Amazon Simple Storage Service Developer Guide.

When making a select request, do the following:

  • Define an output location for the select query's output. This must be an Amazon S3 bucket in the same AWS Region as the bucket that contains the archive object that is being queried. The AWS account that initiates the job must have permissions to write to the S3 bucket. You can specify the storage class and encryption for the output objects stored in the bucket. For more information about output, see Querying Archived Objects in the Amazon Simple Storage Service Developer Guide.

    For more information about the S3 structure in the request body, see the following:

  • Define the SQL expression for the SELECT type of restoration for your query in the request body's SelectParameters structure. You can use expressions like the following examples.

    • The following expression returns all records from the specified object.

      SELECT * FROM Object

    • Assuming that you are not using any headers for data stored in the object, you can specify columns with positional headers.

      SELECT s._1, s._2 FROM Object s WHERE s._3 > 100

    • If you have headers and you set the fileHeaderInfo in the CSV structure in the request body to USE, you can specify headers in the query. (If you set the fileHeaderInfo field to IGNORE, the first row is skipped for the query.) You cannot mix ordinal positions with header column names.

      SELECT s.Id, s.FirstName, s.SSN FROM S3Object s

For more information about using SQL with S3 Glacier Select restore, see SQL Reference for Amazon S3 Select and S3 Glacier Select in the Amazon Simple Storage Service Developer Guide.

When making a select request, you can also do the following:

  • To expedite your queries, specify the Expedited tier. For more information about tiers, see \"Restoring Archives,\" later in this topic.

  • Specify details about the data serialization format of both the input object that is being queried and the serialization of the CSV-encoded query results.

The following are additional important facts about the select feature:

  • The output results are new Amazon S3 objects. Unlike archive retrievals, they are stored until explicitly deleted-manually or through a lifecycle policy.

  • You can issue more than one select request on the same Amazon S3 object. Amazon S3 doesn't deduplicate requests, so avoid issuing duplicate requests.

  • Amazon S3 accepts a select request even if the object has already been restored. A select request doesn’t return error response 409.

Restoring Archives

Objects in the GLACIER and DEEP_ARCHIVE storage classes are archived. To access an archived object, you must first initiate a restore request. This restores a temporary copy of the archived object. In a restore request, you specify the number of days that you want the restored copy to exist. After the specified period, Amazon S3 deletes the temporary copy but the object remains archived in the GLACIER or DEEP_ARCHIVE storage class that object was restored from.

To restore a specific object version, you can provide a version ID. If you don't provide a version ID, Amazon S3 restores the current version.

The time it takes restore jobs to finish depends on which storage class the object is being restored from and which data access tier you specify.

When restoring an archived object (or using a select request), you can specify one of the following data access tier options in the Tier element of the request body:

  • Expedited - Expedited retrievals allow you to quickly access your data stored in the GLACIER storage class when occasional urgent requests for a subset of archives are required. For all but the largest archived objects (250 MB+), data accessed using Expedited retrievals are typically made available within 1–5 minutes. Provisioned capacity ensures that retrieval capacity for Expedited retrievals is available when you need it. Expedited retrievals and provisioned capacity are not available for the DEEP_ARCHIVE storage class.

  • Standard - S3 Standard retrievals allow you to access any of your archived objects within several hours. This is the default option for the GLACIER and DEEP_ARCHIVE retrieval requests that do not specify the retrieval option. S3 Standard retrievals typically complete within 3-5 hours from the GLACIER storage class and typically complete within 12 hours from the DEEP_ARCHIVE storage class.

  • Bulk - Bulk retrievals are Amazon S3 Glacier’s lowest-cost retrieval option, enabling you to retrieve large amounts, even petabytes, of data inexpensively in a day. Bulk retrievals typically complete within 5-12 hours from the GLACIER storage class and typically complete within 48 hours from the DEEP_ARCHIVE storage class.

For more information about archive retrieval options and provisioned capacity for Expedited data access, see Restoring Archived Objects in the Amazon Simple Storage Service Developer Guide.

You can use Amazon S3 restore speed upgrade to change the restore speed to a faster speed while it is in progress. You upgrade the speed of an in-progress restoration by issuing another restore request to the same object, setting a new Tier request element. When issuing a request to upgrade the restore tier, you must choose a tier that is faster than the tier that the in-progress restore is using. You must not change any other parameters, such as the Days request element. For more information, see Upgrading the Speed of an In-Progress Restore in the Amazon Simple Storage Service Developer Guide.

To get the status of object restoration, you can send a HEAD request. Operations return the x-amz-restore header, which provides information about the restoration status, in the response. You can use Amazon S3 event notifications to notify you when a restore is initiated or completed. For more information, see Configuring Amazon S3 Event Notifications in the Amazon Simple Storage Service Developer Guide.

After restoring an archived object, you can update the restoration period by reissuing the request with a new period. Amazon S3 updates the restoration period relative to the current time and charges only for the request-there are no data transfer charges. You cannot update the restoration period when Amazon S3 is actively processing your current restore request for the object.

If your bucket has a lifecycle configuration with a rule that includes an expiration action, the object expiration overrides the life span that you specify in a restore request. For example, if you restore an object copy for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days. For more information about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle Management in Amazon Simple Storage Service Developer Guide.

Responses

A successful operation returns either the 200 OK or 202 Accepted status code.

  • If the object copy is not previously restored, then Amazon S3 returns 202 Accepted in the response.

  • If the object copy is previously restored, Amazon S3 returns 200 OK in the response.

Special Errors

    • Code: RestoreAlreadyInProgress

    • Cause: Object restore is already in progress. (This error does not apply to SELECT type requests.)

    • HTTP Status Code: 409 Conflict

    • SOAP Fault Code Prefix: Client

    • Code: GlacierExpeditedRetrievalNotAvailable

    • Cause: S3 Glacier expedited retrievals are currently not available. Try again later. (Returned if there is insufficient capacity to process the Expedited request. This error applies only to Expedited retrievals and not to S3 Standard or Bulk retrievals.)

    • HTTP Status Code: 503

    • SOAP Fault Code Prefix: N/A

Related Resources

", + "documentation":"

Restores an archived copy of an object back into Amazon S3

This action is not supported by Amazon S3 on Outposts.

This action performs the following types of requests:

  • select - Perform a select query on an archived object

  • restore an archive - Restore an archived object

To use this operation, you must have permissions to perform the s3:RestoreObject action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

Querying Archives with Select Requests

You use a select type of request to perform SQL queries on archived objects. The archived objects that are being queried by the select request must be formatted as uncompressed comma-separated values (CSV) files. You can run queries and custom analytics on your archived data without having to restore your data to a hotter Amazon S3 tier. For an overview about select requests, see Querying Archived Objects in the Amazon Simple Storage Service Developer Guide.

When making a select request, do the following:

  • Define an output location for the select query's output. This must be an Amazon S3 bucket in the same AWS Region as the bucket that contains the archive object that is being queried. The AWS account that initiates the job must have permissions to write to the S3 bucket. You can specify the storage class and encryption for the output objects stored in the bucket. For more information about output, see Querying Archived Objects in the Amazon Simple Storage Service Developer Guide.

    For more information about the S3 structure in the request body, see the following:

  • Define the SQL expression for the SELECT type of restoration for your query in the request body's SelectParameters structure. You can use expressions like the following examples.

    • The following expression returns all records from the specified object.

      SELECT * FROM Object

    • Assuming that you are not using any headers for data stored in the object, you can specify columns with positional headers.

      SELECT s._1, s._2 FROM Object s WHERE s._3 > 100

    • If you have headers and you set the fileHeaderInfo in the CSV structure in the request body to USE, you can specify headers in the query. (If you set the fileHeaderInfo field to IGNORE, the first row is skipped for the query.) You cannot mix ordinal positions with header column names.

      SELECT s.Id, s.FirstName, s.SSN FROM S3Object s

For more information about using SQL with S3 Glacier Select restore, see SQL Reference for Amazon S3 Select and S3 Glacier Select in the Amazon Simple Storage Service Developer Guide.

When making a select request, you can also do the following:

  • To expedite your queries, specify the Expedited tier. For more information about tiers, see \"Restoring Archives,\" later in this topic.

  • Specify details about the data serialization format of both the input object that is being queried and the serialization of the CSV-encoded query results.

The following are additional important facts about the select feature:

  • The output results are new Amazon S3 objects. Unlike archive retrievals, they are stored until explicitly deleted-manually or through a lifecycle policy.

  • You can issue more than one select request on the same Amazon S3 object. Amazon S3 doesn't deduplicate requests, so avoid issuing duplicate requests.

  • Amazon S3 accepts a select request even if the object has already been restored. A select request doesn’t return error response 409.

Restoring objects

Objects that you archive to the S3 Glacier or S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers are not accessible in real time. For objects in Archive Access or Deep Archive Access tiers you must first initiate a restore request, and then wait until the object is moved into the Frequent Access tier. For objects in S3 Glacier or S3 Glacier Deep Archive storage classes you must first initiate a restore request, and then wait until a temporary copy of the object is available. To access an archived object, you must restore the object for the duration (number of days) that you specify.

To restore a specific object version, you can provide a version ID. If you don't provide a version ID, Amazon S3 restores the current version.

When restoring an archived object (or using a select request), you can specify one of the following data access tier options in the Tier element of the request body:

  • Expedited - Expedited retrievals allow you to quickly access your data stored in the S3 Glacier storage class or S3 Intelligent-Tiering Archive tier when occasional urgent requests for a subset of archives are required. For all but the largest archived objects (250 MB+), data accessed using Expedited retrievals is typically made available within 1–5 minutes. Provisioned capacity ensures that retrieval capacity for Expedited retrievals is available when you need it. Expedited retrievals and provisioned capacity are not available for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier.

  • Standard - Standard retrievals allow you to access any of your archived objects within several hours. This is the default option for retrieval requests that do not specify the retrieval option. Standard retrievals typically finish within 3–5 hours for objects stored in the S3 Glacier storage class or S3 Intelligent-Tiering Archive tier. They typically finish within 12 hours for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. Standard retrievals are free for objects stored in S3 Intelligent-Tiering.

  • Bulk - Bulk retrievals are the lowest-cost retrieval option in S3 Glacier, enabling you to retrieve large amounts, even petabytes, of data inexpensively. Bulk retrievals typically finish within 5–12 hours for objects stored in the S3 Glacier storage class or S3 Intelligent-Tiering Archive tier. They typically finish within 48 hours for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. Bulk retrievals are free for objects stored in S3 Intelligent-Tiering.

For more information about archive retrieval options and provisioned capacity for Expedited data access, see Restoring Archived Objects in the Amazon Simple Storage Service Developer Guide.

You can use Amazon S3 restore speed upgrade to change the restore speed to a faster speed while it is in progress. For more information, see Upgrading the speed of an in-progress restore in the Amazon Simple Storage Service Developer Guide.

To get the status of object restoration, you can send a HEAD request. Operations return the x-amz-restore header, which provides information about the restoration status, in the response. You can use Amazon S3 event notifications to notify you when a restore is initiated or completed. For more information, see Configuring Amazon S3 Event Notifications in the Amazon Simple Storage Service Developer Guide.

After restoring an archived object, you can update the restoration period by reissuing the request with a new period. Amazon S3 updates the restoration period relative to the current time and charges only for the request-there are no data transfer charges. You cannot update the restoration period when Amazon S3 is actively processing your current restore request for the object.

If your bucket has a lifecycle configuration with a rule that includes an expiration action, the object expiration overrides the life span that you specify in a restore request. For example, if you restore an object copy for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days. For more information about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle Management in Amazon Simple Storage Service Developer Guide.

Responses

A successful operation returns either the 200 OK or 202 Accepted status code.

  • If the object is not previously restored, then Amazon S3 returns 202 Accepted in the response.

  • If the object is previously restored, Amazon S3 returns 200 OK in the response.

Special Errors

    • Code: RestoreAlreadyInProgress

    • Cause: Object restore is already in progress. (This error does not apply to SELECT type requests.)

    • HTTP Status Code: 409 Conflict

    • SOAP Fault Code Prefix: Client

    • Code: GlacierExpeditedRetrievalNotAvailable

    • Cause: expedited retrievals are currently not available. Try again later. (Returned if there is insufficient capacity to process the Expedited request. This error applies only to Expedited retrievals and not to S3 Standard or Bulk retrievals.)

    • HTTP Status Code: 503

    • SOAP Fault Code Prefix: N/A

Related Resources

", "alias":"PostObjectRestore" }, "SelectObjectContent":{ @@ -1246,6 +1287,13 @@ "type":"string", "enum":["CSV"] }, + "ArchiveStatus":{ + "type":"string", + "enum":[ + "ARCHIVE_ACCESS", + "DEEP_ARCHIVE_ACCESS" + ] + }, "Body":{"type":"blob"}, "Bucket":{ "type":"structure", @@ -1256,7 +1304,7 @@ }, "CreationDate":{ "shape":"CreationDate", - "documentation":"

Date the bucket was created.

" + "documentation":"

Date the bucket was created. This date can change when making changes to your bucket, such as editing its bucket policy.

" } }, "documentation":"

In terms of implementation, a Bucket is a resource. An Amazon S3 bucket name is globally unique, and the namespace is shared by all AWS accounts.

" @@ -1291,6 +1339,7 @@ "authenticated-read" ] }, + "BucketKeyEnabled":{"type":"boolean"}, "BucketLifecycleConfiguration":{ "type":"structure", "required":["Rules"], @@ -1565,6 +1614,12 @@ "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, + "BucketKeyEnabled":{ + "shape":"BucketKeyEnabled", + "documentation":"

Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with AWS KMS (SSE-KMS).

", + "location":"header", + "locationName":"x-amz-server-side-encryption-bucket-key-enabled" + }, "RequestCharged":{ "shape":"RequestCharged", "location":"header", @@ -1740,6 +1795,12 @@ "location":"header", "locationName":"x-amz-server-side-encryption-context" }, + "BucketKeyEnabled":{ + "shape":"BucketKeyEnabled", + "documentation":"

Indicates whether the copied object uses an S3 Bucket Key for server-side encryption with AWS KMS (SSE-KMS).

", + "location":"header", + "locationName":"x-amz-server-side-encryption-bucket-key-enabled" + }, "RequestCharged":{ "shape":"RequestCharged", "location":"header", @@ -1930,6 +1991,12 @@ "location":"header", "locationName":"x-amz-server-side-encryption-context" }, + "BucketKeyEnabled":{ + "shape":"BucketKeyEnabled", + "documentation":"

Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.

Specifying this header with a COPY operation doesn’t affect bucket-level settings for S3 Bucket Key.

", + "location":"header", + "locationName":"x-amz-server-side-encryption-bucket-key-enabled" + }, "CopySourceSSECustomerAlgorithm":{ "shape":"CopySourceSSECustomerAlgorithm", "documentation":"

Specifies the algorithm to use when decrypting the source object (for example, AES256).

", @@ -2175,6 +2242,12 @@ "location":"header", "locationName":"x-amz-server-side-encryption-context" }, + "BucketKeyEnabled":{ + "shape":"BucketKeyEnabled", + "documentation":"

Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with AWS KMS (SSE-KMS).

", + "location":"header", + "locationName":"x-amz-server-side-encryption-bucket-key-enabled" + }, "RequestCharged":{ "shape":"RequestCharged", "location":"header", @@ -2321,6 +2394,12 @@ "location":"header", "locationName":"x-amz-server-side-encryption-context" }, + "BucketKeyEnabled":{ + "shape":"BucketKeyEnabled", + "documentation":"

Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.

Specifying this header with an object operation doesn’t affect bucket-level settings for S3 Bucket Key.

", + "location":"header", + "locationName":"x-amz-server-side-encryption-bucket-key-enabled" + }, "RequestPayer":{ "shape":"RequestPayer", "location":"header", @@ -2462,6 +2541,27 @@ } } }, + "DeleteBucketIntelligentTieringConfigurationRequest":{ + "type":"structure", + "required":[ + "Bucket", + "Id" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "documentation":"

The name of the Amazon S3 bucket whose configuration you want to modify or retrieve.

", + "location":"uri", + "locationName":"Bucket" + }, + "Id":{ + "shape":"IntelligentTieringId", + "documentation":"

The ID used to identify the S3 Intelligent-Tiering configuration.

", + "location":"querystring", + "locationName":"id" + } + } + }, "DeleteBucketInventoryConfigurationRequest":{ "type":"structure", "required":[ @@ -2546,6 +2646,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", + "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -2673,10 +2774,10 @@ "members":{ "Status":{ "shape":"DeleteMarkerReplicationStatus", - "documentation":"

Indicates whether to replicate delete markers.

In the current implementation, Amazon S3 doesn't replicate the delete markers. The status must be Disabled.

" + "documentation":"

Indicates whether to replicate delete markers.

Indicates whether to replicate delete markers.

" } }, - "documentation":"

Specifies whether Amazon S3 replicates the delete markers. If you specify a Filter, you must specify this element. However, in the latest version of replication configuration (when Filter is specified), Amazon S3 doesn't replicate delete markers. Therefore, the DeleteMarkerReplication element can contain only <Status>Disabled</Status>. For an example configuration, see Basic Rule Configuration.

If you don't specify the Filter element, Amazon S3 assumes that the replication configuration is the earlier version, V1. In the earlier version, Amazon S3 handled replication of delete markers differently. For more information, see Backward Compatibility.

" + "documentation":"

Specifies whether Amazon S3 replicates delete markers. If you specify a Filter in your replication configuration, you must also include a DeleteMarkerReplication element. If your Filter includes a Tag element, the DeleteMarkerReplication Status must be set to Disabled, because Amazon S3 does not support replicating delete markers for tag-based rules. For an example configuration, see Basic Rule Configuration.

For more information about delete marker replication, see Basic Rule Configuration.

If you are using an earlier version of the replication configuration, Amazon S3 handles replication of delete markers differently. For more information, see Backward Compatibility.

" }, "DeleteMarkerReplicationStatus":{ "type":"string", @@ -2948,7 +3049,7 @@ }, "Metrics":{ "shape":"Metrics", - "documentation":"

A container specifying replication metrics-related settings enabling metrics and Amazon S3 events for S3 Replication Time Control (S3 RTC). Must be specified together with a ReplicationTime block.

" + "documentation":"

A container specifying replication metrics-related settings enabling replication metrics and events.

" } }, "documentation":"

Specifies information about where to publish analysis or configuration results for an Amazon S3 bucket and S3 Replication Time Control (S3 RTC).

" @@ -3291,6 +3392,37 @@ } } }, + "GetBucketIntelligentTieringConfigurationOutput":{ + "type":"structure", + "members":{ + "IntelligentTieringConfiguration":{ + "shape":"IntelligentTieringConfiguration", + "documentation":"

Container for S3 Intelligent-Tiering configuration.

" + } + }, + "payload":"IntelligentTieringConfiguration" + }, + "GetBucketIntelligentTieringConfigurationRequest":{ + "type":"structure", + "required":[ + "Bucket", + "Id" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "documentation":"

The name of the Amazon S3 bucket whose configuration you want to modify or retrieve.

", + "location":"uri", + "locationName":"Bucket" + }, + "Id":{ + "shape":"IntelligentTieringId", + "documentation":"

The ID used to identify the S3 Intelligent-Tiering configuration.

", + "location":"querystring", + "locationName":"id" + } + } + }, "GetBucketInventoryConfigurationOutput":{ "type":"structure", "members":{ @@ -3512,6 +3644,7 @@ }, "ExpectedBucketOwner":{ "shape":"AccountId", + "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" } @@ -3997,6 +4130,12 @@ "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, + "BucketKeyEnabled":{ + "shape":"BucketKeyEnabled", + "documentation":"

Indicates whether the object uses an S3 Bucket Key for server-side encryption with AWS KMS (SSE-KMS).

", + "location":"header", + "locationName":"x-amz-server-side-encryption-bucket-key-enabled" + }, "StorageClass":{ "shape":"StorageClass", "documentation":"

Provides storage class information of the object. Amazon S3 returns this header for all objects except for S3 Standard storage class objects.

", @@ -4354,7 +4493,7 @@ "members":{ "Tier":{ "shape":"Tier", - "documentation":"

S3 Glacier retrieval tier at which the restore will be processed.

" + "documentation":"

Retrieval tier at which the restore will be processed.

" } }, "documentation":"

Container for S3 Glacier job parameters.

" @@ -4463,6 +4602,12 @@ "location":"header", "locationName":"x-amz-restore" }, + "ArchiveStatus":{ + "shape":"ArchiveStatus", + "documentation":"

The archive state of the head object.

", + "location":"header", + "locationName":"x-amz-archive-status" + }, "LastModified":{ "shape":"LastModified", "documentation":"

Last modified date of the object

", @@ -4565,6 +4710,12 @@ "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, + "BucketKeyEnabled":{ + "shape":"BucketKeyEnabled", + "documentation":"

Indicates whether the object uses an S3 Bucket Key for server-side encryption with AWS KMS (SSE-KMS).

", + "location":"header", + "locationName":"x-amz-server-side-encryption-bucket-key-enabled" + }, "StorageClass":{ "shape":"StorageClass", "documentation":"

Provides storage class information of the object. Amazon S3 returns this header for all objects except for S3 Standard storage class objects.

For more information, see Storage Classes.

", @@ -4578,7 +4729,7 @@ }, "ReplicationStatus":{ "shape":"ReplicationStatus", - "documentation":"

Amazon S3 can return this header if your request involves a bucket that is either a source or destination in a replication rule.

In replication, you have a source bucket on which you configure replication and destination bucket where Amazon S3 stores object replicas. When you request an object (GetObject) or object metadata (HeadObject) from these buckets, Amazon S3 will return the x-amz-replication-status header in the response as follows:

  • If requesting an object from the source bucket — Amazon S3 will return the x-amz-replication-status header if the object in your request is eligible for replication.

    For example, suppose that in your replication configuration, you specify object prefix TaxDocs requesting Amazon S3 to replicate objects with key prefix TaxDocs. Any objects you upload with this key name prefix, for example TaxDocs/document1.pdf, are eligible for replication. For any object request with this key name prefix, Amazon S3 will return the x-amz-replication-status header with value PENDING, COMPLETED or FAILED indicating object replication status.

  • If requesting an object from the destination bucket — Amazon S3 will return the x-amz-replication-status header with value REPLICA if the object in your request is a replica that Amazon S3 created.

For more information, see Replication.

", + "documentation":"

Amazon S3 can return this header if your request involves a bucket that is either a source or a destination in a replication rule.

In replication, you have a source bucket on which you configure replication and destination bucket or buckets where Amazon S3 stores object replicas. When you request an object (GetObject) or object metadata (HeadObject) from these buckets, Amazon S3 will return the x-amz-replication-status header in the response as follows:

  • If requesting an object from the source bucket — Amazon S3 will return the x-amz-replication-status header if the object in your request is eligible for replication.

    For example, suppose that in your replication configuration, you specify object prefix TaxDocs requesting Amazon S3 to replicate objects with key prefix TaxDocs. Any objects you upload with this key name prefix, for example TaxDocs/document1.pdf, are eligible for replication. For any object request with this key name prefix, Amazon S3 will return the x-amz-replication-status header with value PENDING, COMPLETED or FAILED indicating object replication status.

  • If requesting an object from a destination bucket — Amazon S3 will return the x-amz-replication-status header with value REPLICA if the object in your request is a replica that Amazon S3 created and there is no replica modification replication in progress.

  • When replicating objects to multiple destination buckets the x-amz-replication-status header acts differently. The header of the source object will only return a value of COMPLETED when replication is successful to all destinations. The header will remain at value PENDING until replication has completed for all destinations. If one or more destinations fails replication the header will return FAILED.

For more information, see Replication.

", "location":"header", "locationName":"x-amz-replication-status" }, @@ -4756,6 +4907,95 @@ }, "documentation":"

Describes the serialization format of the object.

" }, + "IntelligentTieringAccessTier":{ + "type":"string", + "enum":[ + "ARCHIVE_ACCESS", + "DEEP_ARCHIVE_ACCESS" + ] + }, + "IntelligentTieringAndOperator":{ + "type":"structure", + "members":{ + "Prefix":{ + "shape":"Prefix", + "documentation":"

An object key name prefix that identifies the subset of objects to which the configuration applies.

" + }, + "Tags":{ + "shape":"TagSet", + "documentation":"

All of these tags must exist in the object's tag set in order for the configuration to apply.

", + "flattened":true, + "locationName":"Tag" + } + }, + "documentation":"

A container for specifying S3 Intelligent-Tiering filters. The filters determine the subset of objects to which the rule applies.

" + }, + "IntelligentTieringConfiguration":{ + "type":"structure", + "required":[ + "Id", + "Status", + "Tierings" + ], + "members":{ + "Id":{ + "shape":"IntelligentTieringId", + "documentation":"

The ID used to identify the S3 Intelligent-Tiering configuration.

" + }, + "Filter":{ + "shape":"IntelligentTieringFilter", + "documentation":"

Specifies a bucket filter. The configuration only includes objects that meet the filter's criteria.

" + }, + "Status":{ + "shape":"IntelligentTieringStatus", + "documentation":"

Specifies the status of the configuration.

" + }, + "Tierings":{ + "shape":"TieringList", + "documentation":"

Specifies the S3 Intelligent-Tiering storage class tier of the configuration.

", + "locationName":"Tiering" + } + }, + "documentation":"

Specifies the S3 Intelligent-Tiering configuration for an Amazon S3 bucket.

For information about the S3 Intelligent-Tiering storage class, see Storage class for automatically optimizing frequently and infrequently accessed objects.

" + }, + "IntelligentTieringConfigurationList":{ + "type":"list", + "member":{"shape":"IntelligentTieringConfiguration"}, + "flattened":true + }, + "IntelligentTieringDays":{"type":"integer"}, + "IntelligentTieringFilter":{ + "type":"structure", + "members":{ + "Prefix":{ + "shape":"Prefix", + "documentation":"

An object key name prefix that identifies the subset of objects to which the rule applies.

" + }, + "Tag":{"shape":"Tag"}, + "And":{ + "shape":"IntelligentTieringAndOperator", + "documentation":"

A conjunction (logical AND) of predicates, which is used in evaluating a metrics filter. The operator must have at least two predicates, and an object must match all of the predicates in order for the filter to apply.

" + } + }, + "documentation":"

The Filter is used to identify objects that the S3 Intelligent-Tiering configuration applies to.

" + }, + "IntelligentTieringId":{"type":"string"}, + "IntelligentTieringStatus":{ + "type":"string", + "enum":[ + "Enabled", + "Disabled" + ] + }, + "InvalidObjectState":{ + "type":"structure", + "members":{ + "StorageClass":{"shape":"StorageClass"}, + "AccessTier":{"shape":"IntelligentTieringAccessTier"} + }, + "documentation":"

Object is archived and inaccessible until restored.

", + "exception":true + }, "InventoryConfiguration":{ "type":"structure", "required":[ @@ -5140,6 +5380,46 @@ } } }, + "ListBucketIntelligentTieringConfigurationsOutput":{ + "type":"structure", + "members":{ + "IsTruncated":{ + "shape":"IsTruncated", + "documentation":"

Indicates whether the returned list of analytics configurations is complete. A value of true indicates that the list is not complete and the NextContinuationToken will be provided for a subsequent request.

" + }, + "ContinuationToken":{ + "shape":"Token", + "documentation":"

The ContinuationToken that represents a placeholder from where this request should begin.

" + }, + "NextContinuationToken":{ + "shape":"NextToken", + "documentation":"

The marker used to continue this inventory configuration listing. Use the NextContinuationToken from this response to continue the listing in a subsequent request. The continuation token is an opaque value that Amazon S3 understands.

" + }, + "IntelligentTieringConfigurationList":{ + "shape":"IntelligentTieringConfigurationList", + "documentation":"

The list of S3 Intelligent-Tiering configurations for a bucket.

", + "locationName":"IntelligentTieringConfiguration" + } + } + }, + "ListBucketIntelligentTieringConfigurationsRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "documentation":"

The name of the Amazon S3 bucket whose configuration you want to modify or retrieve.

", + "location":"uri", + "locationName":"Bucket" + }, + "ContinuationToken":{ + "shape":"Token", + "documentation":"

The ContinuationToken that represents a placeholder from where this request should begin.

", + "location":"querystring", + "locationName":"continuation-token" + } + } + }, "ListBucketInventoryConfigurationsOutput":{ "type":"structure", "members":{ @@ -5874,10 +6154,7 @@ "MetadataValue":{"type":"string"}, "Metrics":{ "type":"structure", - "required":[ - "Status", - "EventThreshold" - ], + "required":["Status"], "members":{ "Status":{ "shape":"MetricsStatus", @@ -5888,7 +6165,7 @@ "documentation":"

A container specifying the time threshold for emitting the s3:Replication:OperationMissedThreshold event.

" } }, - "documentation":"

A container specifying replication metrics-related settings enabling metrics and Amazon S3 events for S3 Replication Time Control (S3 RTC). Must be specified together with a ReplicationTime block.

" + "documentation":"

A container specifying replication metrics-related settings enabling replication metrics and events.

" }, "MetricsAndOperator":{ "type":"structure", @@ -6522,7 +6799,7 @@ }, "RestrictPublicBuckets":{ "shape":"Setting", - "documentation":"

Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting this element to TRUE restricts access to this bucket to only AWS services and authorized users within this account if the bucket has a public policy.

Enabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked.

", + "documentation":"

Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting this element to TRUE restricts access to this bucket to only AWS service principals and authorized users within this account if the bucket has a public policy.

Enabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked.

", "locationName":"RestrictPublicBuckets" } }, @@ -6580,9 +6857,7 @@ }, "ContentMD5":{ "shape":"ContentMD5", - "documentation":"

The base64-encoded 128-bit MD5 digest of the data. This header must be used as a message integrity check to verify that the request body was not corrupted in transit. For more information, go to RFC 1864.

", - "deprecated":true, - "deprecatedMessage":"Content-MD5 header will now be automatically computed and injected in associated operation's Http request.", + "documentation":"

The base64-encoded 128-bit MD5 digest of the data. This header must be used as a message integrity check to verify that the request body was not corrupted in transit. For more information, go to RFC 1864.

For requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.

", "location":"header", "locationName":"Content-MD5" }, @@ -6681,9 +6956,7 @@ }, "ContentMD5":{ "shape":"ContentMD5", - "documentation":"

The base64-encoded 128-bit MD5 digest of the data. This header must be used as a message integrity check to verify that the request body was not corrupted in transit. For more information, go to RFC 1864.

", - "deprecated":true, - "deprecatedMessage":"Content-MD5 header will now be automatically computed and injected in associated operation's Http request.", + "documentation":"

The base64-encoded 128-bit MD5 digest of the data. This header must be used as a message integrity check to verify that the request body was not corrupted in transit. For more information, go to RFC 1864.

For requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.

", "location":"header", "locationName":"Content-MD5" }, @@ -6711,9 +6984,7 @@ }, "ContentMD5":{ "shape":"ContentMD5", - "documentation":"

The base64-encoded 128-bit MD5 digest of the server-side encryption configuration. This parameter is auto-populated when using the command from the CLI.

", - "deprecated":true, - "deprecatedMessage":"Content-MD5 header will now be automatically computed and injected in associated operation's Http request.", + "documentation":"

The base64-encoded 128-bit MD5 digest of the server-side encryption configuration.

For requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.

", "location":"header", "locationName":"Content-MD5" }, @@ -6731,6 +7002,35 @@ }, "payload":"ServerSideEncryptionConfiguration" }, + "PutBucketIntelligentTieringConfigurationRequest":{ + "type":"structure", + "required":[ + "Bucket", + "Id", + "IntelligentTieringConfiguration" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "documentation":"

The name of the Amazon S3 bucket whose configuration you want to modify or retrieve.

", + "location":"uri", + "locationName":"Bucket" + }, + "Id":{ + "shape":"IntelligentTieringId", + "documentation":"

The ID used to identify the S3 Intelligent-Tiering configuration.

", + "location":"querystring", + "locationName":"id" + }, + "IntelligentTieringConfiguration":{ + "shape":"IntelligentTieringConfiguration", + "documentation":"

Container for S3 Intelligent-Tiering configuration.

", + "locationName":"IntelligentTieringConfiguration", + "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + } + }, + "payload":"IntelligentTieringConfiguration" + }, "PutBucketInventoryConfigurationRequest":{ "type":"structure", "required":[ @@ -6803,9 +7103,7 @@ }, "ContentMD5":{ "shape":"ContentMD5", - "documentation":"

", - "deprecated":true, - "deprecatedMessage":"Content-MD5 header will now be automatically computed and injected in associated operation's Http request.", + "documentation":"

For requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.

", "location":"header", "locationName":"Content-MD5" }, @@ -6845,9 +7143,7 @@ }, "ContentMD5":{ "shape":"ContentMD5", - "documentation":"

The MD5 hash of the PutBucketLogging request body.

", - "deprecated":true, - "deprecatedMessage":"Content-MD5 header will now be automatically computed and injected in associated operation's Http request.", + "documentation":"

The MD5 hash of the PutBucketLogging request body.

For requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.

", "location":"header", "locationName":"Content-MD5" }, @@ -6937,9 +7233,7 @@ }, "ContentMD5":{ "shape":"ContentMD5", - "documentation":"

The MD5 hash of the PutPublicAccessBlock request body.

", - "deprecated":true, - "deprecatedMessage":"Content-MD5 header will now be automatically computed and injected in associated operation's Http request.", + "documentation":"

The MD5 hash of the PutPublicAccessBlock request body.

For requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.

", "location":"header", "locationName":"Content-MD5" }, @@ -6973,12 +7267,13 @@ }, "ContentMD5":{ "shape":"ContentMD5", - "documentation":"

The MD5 hash of the OwnershipControls request body.

", + "documentation":"

The MD5 hash of the OwnershipControls request body.

For requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.

", "location":"header", "locationName":"Content-MD5" }, "ExpectedBucketOwner":{ "shape":"AccountId", + "documentation":"

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "location":"header", "locationName":"x-amz-expected-bucket-owner" }, @@ -7006,9 +7301,7 @@ }, "ContentMD5":{ "shape":"ContentMD5", - "documentation":"

The MD5 hash of the request body.

", - "deprecated":true, - "deprecatedMessage":"Content-MD5 header will now be automatically computed and injected in associated operation's Http request.", + "documentation":"

The MD5 hash of the request body.

For requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.

", "location":"header", "locationName":"Content-MD5" }, @@ -7046,9 +7339,7 @@ }, "ContentMD5":{ "shape":"ContentMD5", - "documentation":"

The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864.

", - "deprecated":true, - "deprecatedMessage":"Content-MD5 header will now be automatically computed and injected in associated operation's Http request.", + "documentation":"

The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864.

For requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.

", "location":"header", "locationName":"Content-MD5" }, @@ -7059,7 +7350,7 @@ }, "Token":{ "shape":"ObjectLockToken", - "documentation":"

", + "documentation":"

A token to allow Object Lock to be enabled for an existing bucket.

", "location":"header", "locationName":"x-amz-bucket-object-lock-token" }, @@ -7087,9 +7378,7 @@ }, "ContentMD5":{ "shape":"ContentMD5", - "documentation":"

>The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864.

", - "deprecated":true, - "deprecatedMessage":"Content-MD5 header will now be automatically computed and injected in associated operation's Http request.", + "documentation":"

>The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864.

For requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.

", "location":"header", "locationName":"Content-MD5" }, @@ -7123,9 +7412,7 @@ }, "ContentMD5":{ "shape":"ContentMD5", - "documentation":"

The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864.

", - "deprecated":true, - "deprecatedMessage":"Content-MD5 header will now be automatically computed and injected in associated operation's Http request.", + "documentation":"

The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864.

For requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.

", "location":"header", "locationName":"Content-MD5" }, @@ -7159,9 +7446,7 @@ }, "ContentMD5":{ "shape":"ContentMD5", - "documentation":"

>The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864.

", - "deprecated":true, - "deprecatedMessage":"Content-MD5 header will now be automatically computed and injected in associated operation's Http request.", + "documentation":"

>The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864.

For requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.

", "location":"header", "locationName":"Content-MD5" }, @@ -7201,9 +7486,7 @@ }, "ContentMD5":{ "shape":"ContentMD5", - "documentation":"

The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864.

", - "deprecated":true, - "deprecatedMessage":"Content-MD5 header will now be automatically computed and injected in associated operation's Http request.", + "documentation":"

The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864.

For requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.

", "location":"header", "locationName":"Content-MD5" }, @@ -7259,9 +7542,7 @@ }, "ContentMD5":{ "shape":"ContentMD5", - "documentation":"

The base64-encoded 128-bit MD5 digest of the data. This header must be used as a message integrity check to verify that the request body was not corrupted in transit. For more information, go to RFC 1864.>

", - "deprecated":true, - "deprecatedMessage":"Content-MD5 header will now be automatically computed and injected in associated operation's Http request.", + "documentation":"

The base64-encoded 128-bit MD5 digest of the data. This header must be used as a message integrity check to verify that the request body was not corrupted in transit. For more information, go to RFC 1864.>

For requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.

", "location":"header", "locationName":"Content-MD5" }, @@ -7369,9 +7650,7 @@ }, "ContentMD5":{ "shape":"ContentMD5", - "documentation":"

The MD5 hash for the request body.

", - "deprecated":true, - "deprecatedMessage":"Content-MD5 header will now be automatically computed and injected in associated operation's Http request.", + "documentation":"

The MD5 hash for the request body.

For requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.

", "location":"header", "locationName":"Content-MD5" }, @@ -7423,9 +7702,7 @@ }, "ContentMD5":{ "shape":"ContentMD5", - "documentation":"

The MD5 hash for the request body.

", - "deprecated":true, - "deprecatedMessage":"Content-MD5 header will now be automatically computed and injected in associated operation's Http request.", + "documentation":"

The MD5 hash for the request body.

For requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.

", "location":"header", "locationName":"Content-MD5" }, @@ -7489,6 +7766,12 @@ "location":"header", "locationName":"x-amz-server-side-encryption-context" }, + "BucketKeyEnabled":{ + "shape":"BucketKeyEnabled", + "documentation":"

Indicates whether the uploaded object uses an S3 Bucket Key for server-side encryption with AWS KMS (SSE-KMS).

", + "location":"header", + "locationName":"x-amz-server-side-encryption-bucket-key-enabled" + }, "RequestCharged":{ "shape":"RequestCharged", "location":"header", @@ -7652,6 +7935,12 @@ "location":"header", "locationName":"x-amz-server-side-encryption-context" }, + "BucketKeyEnabled":{ + "shape":"BucketKeyEnabled", + "documentation":"

Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.

Specifying this header with a PUT operation doesn’t affect bucket-level settings for S3 Bucket Key.

", + "location":"header", + "locationName":"x-amz-server-side-encryption-bucket-key-enabled" + }, "RequestPayer":{ "shape":"RequestPayer", "location":"header", @@ -7744,9 +8033,7 @@ }, "ContentMD5":{ "shape":"ContentMD5", - "documentation":"

The MD5 hash for the request body.

", - "deprecated":true, - "deprecatedMessage":"Content-MD5 header will now be automatically computed and injected in associated operation's Http request.", + "documentation":"

The MD5 hash for the request body.

For requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.

", "location":"header", "locationName":"Content-MD5" }, @@ -7798,9 +8085,7 @@ }, "ContentMD5":{ "shape":"ContentMD5", - "documentation":"

The MD5 hash for the request body.

", - "deprecated":true, - "deprecatedMessage":"Content-MD5 header will now be automatically computed and injected in associated operation's Http request.", + "documentation":"

The MD5 hash for the request body.

For requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.

", "location":"header", "locationName":"Content-MD5" }, @@ -7834,9 +8119,7 @@ }, "ContentMD5":{ "shape":"ContentMD5", - "documentation":"

The MD5 hash of the PutPublicAccessBlock request body.

", - "deprecated":true, - "deprecatedMessage":"Content-MD5 header will now be automatically computed and injected in associated operation's Http request.", + "documentation":"

The MD5 hash of the PutPublicAccessBlock request body.

For requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.

", "location":"header", "locationName":"Content-MD5" }, @@ -7971,6 +8254,24 @@ "ReplaceKeyPrefixWith":{"type":"string"}, "ReplaceKeyWith":{"type":"string"}, "ReplicaKmsKeyID":{"type":"string"}, + "ReplicaModifications":{ + "type":"structure", + "required":["Status"], + "members":{ + "Status":{ + "shape":"ReplicaModificationsStatus", + "documentation":"

Specifies whether Amazon S3 replicates modifications on replicas.

" + } + }, + "documentation":"

A filter that you can specify for selection for modifications on replicas. Amazon S3 doesn't replicate replica modifications by default. In the latest version of replication configuration (when Filter is specified), you can specify this element and set the status to Enabled to replicate modifications on replicas.

If you don't specify the Filter element, Amazon S3 assumes that the replication configuration is the earlier version, V1. In the earlier version, this element is not allowed.

" + }, + "ReplicaModificationsStatus":{ + "type":"string", + "enum":[ + "Enabled", + "Disabled" + ] + }, "ReplicationConfiguration":{ "type":"structure", "required":[ @@ -8003,7 +8304,7 @@ }, "Priority":{ "shape":"Priority", - "documentation":"

The priority associated with the rule. If you specify multiple rules in a replication configuration, Amazon S3 prioritizes the rules to prevent conflicts when filtering. If two or more rules identify the same object based on a specified filter, the rule with higher priority takes precedence. For example:

  • Same object quality prefix-based filter criteria if prefixes you specified in multiple rules overlap

  • Same object qualify tag-based filter criteria specified in multiple rules

For more information, see Replication in the Amazon Simple Storage Service Developer Guide.

" + "documentation":"

The priority indicates which rule has precedence whenever two or more replication rules conflict. Amazon S3 will attempt to replicate objects according to all replication rules. However, if there are two or more rules with the same destination bucket, then objects will be replicated according to the rule with the highest priority. The higher the number, the higher the priority.

For more information, see Replication in the Amazon Simple Storage Service Developer Guide.

" }, "Prefix":{ "shape":"Prefix", @@ -8184,7 +8485,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

The bucket name or containing the object to restore.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

The bucket name containing the object to restore.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", "location":"uri", "locationName":"Bucket" }, @@ -8225,7 +8526,7 @@ "members":{ "Days":{ "shape":"Days", - "documentation":"

Lifetime of the active copy in days. Do not use with restores that specify OutputLocation.

" + "documentation":"

Lifetime of the active copy in days. Do not use with restores that specify OutputLocation.

The Days element is required for regular restores, and must not be provided for select requests.

" }, "GlacierJobParameters":{ "shape":"GlacierJobParameters", @@ -8237,7 +8538,7 @@ }, "Tier":{ "shape":"Tier", - "documentation":"

S3 Glacier retrieval tier at which the restore will be processed.

" + "documentation":"

Retrieval tier at which the restore will be processed.

" }, "Description":{ "shape":"Description", @@ -8594,6 +8895,10 @@ "ApplyServerSideEncryptionByDefault":{ "shape":"ServerSideEncryptionByDefault", "documentation":"

Specifies the default server-side encryption to apply to new objects in the bucket. If a PUT Object request doesn't specify any server-side encryption, this default encryption will be applied.

" + }, + "BucketKeyEnabled":{ + "shape":"BucketKeyEnabled", + "documentation":"

Specifies whether Amazon S3 should use an S3 Bucket Key with server-side encryption using KMS (SSE-KMS) for new objects in the bucket. Existing objects are not affected. Setting the BucketKeyEnabled element to true causes Amazon S3 to use an S3 Bucket Key. By default, S3 Bucket Key is not enabled.

For more information, see Amazon S3 Bucket Keys in the Amazon Simple Storage Service Developer Guide.

" } }, "documentation":"

Specifies the default server-side encryption configuration.

" @@ -8611,6 +8916,10 @@ "SseKmsEncryptedObjects":{ "shape":"SseKmsEncryptedObjects", "documentation":"

A container for filter information for the selection of Amazon S3 objects encrypted with AWS KMS. If you include SourceSelectionCriteria in the replication configuration, this element is required.

" + }, + "ReplicaModifications":{ + "shape":"ReplicaModifications", + "documentation":"

A filter that you can specify for selections for modifications on replicas. Amazon S3 doesn't replicate replica modifications by default. In the latest version of replication configuration (when Filter is specified), you can specify this element and set the status to Enabled to replicate modifications on replicas.

If you don't specify the Filter element, Amazon S3 assumes that the replication configuration is the earlier version, V1. In the earlier version, this element is not allowed

" } }, "documentation":"

A container that describes additional filters for identifying the source objects that you want to replicate. You can choose to enable or disable the replication of these objects. Currently, Amazon S3 supports only the filter that you can specify for objects created with server-side encryption using a customer master key (CMK) stored in AWS Key Management Service (SSE-KMS).

" @@ -8787,6 +9096,29 @@ "Expedited" ] }, + "Tiering":{ + "type":"structure", + "required":[ + "Days", + "AccessTier" + ], + "members":{ + "Days":{ + "shape":"IntelligentTieringDays", + "documentation":"

The number of consecutive days of no access after which an object will be eligible to be transitioned to the corresponding tier. The minimum number of days specified for Archive Access tier must be at least 90 days and Deep Archive Access tier must be at least 180 days. The maximum can be up to 2 years (730 days).

" + }, + "AccessTier":{ + "shape":"IntelligentTieringAccessTier", + "documentation":"

S3 Intelligent-Tiering access tier. See Storage class for automatically optimizing frequently and infrequently accessed objects for a list of access tiers in the S3 Intelligent-Tiering storage class.

" + } + }, + "documentation":"

The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without additional operational overhead.

" + }, + "TieringList":{ + "type":"list", + "member":{"shape":"Tiering"}, + "flattened":true + }, "Token":{"type":"string"}, "TopicArn":{"type":"string"}, "TopicConfiguration":{ @@ -8917,6 +9249,12 @@ "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, + "BucketKeyEnabled":{ + "shape":"BucketKeyEnabled", + "documentation":"

Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with AWS KMS (SSE-KMS).

", + "location":"header", + "locationName":"x-amz-server-side-encryption-bucket-key-enabled" + }, "RequestCharged":{ "shape":"RequestCharged", "location":"header", @@ -9083,6 +9421,12 @@ "location":"header", "locationName":"x-amz-server-side-encryption-aws-kms-key-id" }, + "BucketKeyEnabled":{ + "shape":"BucketKeyEnabled", + "documentation":"

Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with AWS KMS (SSE-KMS).

", + "location":"header", + "locationName":"x-amz-server-side-encryption-bucket-key-enabled" + }, "RequestCharged":{ "shape":"RequestCharged", "location":"header", diff --git a/services/s3/src/main/resources/software/amazon/awssdk/services/s3/execution.interceptors b/services/s3/src/main/resources/software/amazon/awssdk/services/s3/execution.interceptors index 91ecfda62673..d9d447cb955d 100644 --- a/services/s3/src/main/resources/software/amazon/awssdk/services/s3/execution.interceptors +++ b/services/s3/src/main/resources/software/amazon/awssdk/services/s3/execution.interceptors @@ -5,7 +5,6 @@ software.amazon.awssdk.services.s3.internal.handlers.CreateMultipartUploadReques software.amazon.awssdk.services.s3.internal.handlers.EnableChunkedEncodingInterceptor software.amazon.awssdk.services.s3.internal.handlers.DisableDoubleUrlEncodingInterceptor software.amazon.awssdk.services.s3.internal.handlers.DecodeUrlEncodedResponseInterceptor -software.amazon.awssdk.services.s3.internal.handlers.AddContentMd5HeaderInterceptor software.amazon.awssdk.services.s3.internal.handlers.GetBucketPolicyInterceptor software.amazon.awssdk.services.s3.internal.handlers.AsyncChecksumValidationInterceptor software.amazon.awssdk.services.s3.internal.handlers.SyncChecksumValidationInterceptor diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/utils/S3EndpointResolutionTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/S3EndpointResolutionTest.java similarity index 99% rename from services/s3/src/test/java/software/amazon/awssdk/services/s3/utils/S3EndpointResolutionTest.java rename to services/s3/src/test/java/software/amazon/awssdk/services/s3/S3EndpointResolutionTest.java index 26050f8c5b14..3b21d7b0078c 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/utils/S3EndpointResolutionTest.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/S3EndpointResolutionTest.java @@ -13,7 +13,7 @@ * permissions and limitations under the License. */ -package software.amazon.awssdk.services.s3.utils; +package software.amazon.awssdk.services.s3; import static org.assertj.core.api.Assertions.assertThat; diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/checksums/ChecksumResetsOnRetryTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/checksums/ChecksumResetsOnRetryTest.java index 38c4ccd040b9..e3fccda697c7 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/checksums/ChecksumResetsOnRetryTest.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/checksums/ChecksumResetsOnRetryTest.java @@ -24,6 +24,9 @@ import com.github.tomakehurst.wiremock.client.ResponseDefinitionBuilder; import com.github.tomakehurst.wiremock.client.WireMock; +import com.github.tomakehurst.wiremock.common.ConsoleNotifier; +import com.github.tomakehurst.wiremock.common.Slf4jNotifier; +import com.github.tomakehurst.wiremock.core.WireMockConfiguration; import com.github.tomakehurst.wiremock.junit.WireMockRule; import com.github.tomakehurst.wiremock.stubbing.Scenario; import java.net.URI; @@ -50,7 +53,8 @@ */ public class ChecksumResetsOnRetryTest { @Rule - public WireMockRule mockServer = new WireMockRule(0); + public WireMockRule mockServer = new WireMockRule(new WireMockConfiguration().port(0) + .notifier(new ConsoleNotifier(true))); private S3Client s3Client; diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/endpoints/S3AccessPointEndpointResolverTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/endpoints/S3AccessPointEndpointResolverTest.java new file mode 100644 index 000000000000..b37df4beea9d --- /dev/null +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/endpoints/S3AccessPointEndpointResolverTest.java @@ -0,0 +1,563 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.endpoints; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static software.amazon.awssdk.utils.http.SdkHttpUtils.urlEncode; + +import java.net.URI; +import org.junit.Before; +import org.junit.Test; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpMethod; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Configuration; +import software.amazon.awssdk.services.s3.internal.ConfiguredS3SdkHttpRequest; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.utils.InterceptorTestUtils; + +public class S3AccessPointEndpointResolverTest { + + S3AccessPointEndpointResolver endpointResolver; + + @Before + public void setUp() { + endpointResolver = S3AccessPointEndpointResolver.create(); + } + + @Test + public void accesspointArn_shouldConvertEndpoint() { + verifyAccesspointArn("http", + "arn:aws:s3:us-east-1:12345678910:accesspoint:foobar", + "http://foobar-12345678910.s3-accesspoint.us-east-1.amazonaws.com", + S3Configuration.builder()); + verifyAccesspointArn("https", + "arn:aws:s3:us-east-1:12345678910:accesspoint:foobar", + "https://foobar-12345678910.s3-accesspoint.us-east-1.amazonaws.com", + S3Configuration.builder()); + } + + @Test + public void accesspointArn_futureUnknownRegion_US_correctlyInfersPartition() { + verifyAccesspointArn("http", + "arn:aws:s3:us-future-1:12345678910:accesspoint:foobar", + "http://foobar-12345678910.s3-accesspoint.us-future-1.amazonaws.com", + Region.of("us-future-1"), + S3Configuration.builder(), + Region.of("us-future-1")); + } + + @Test + public void accesspointArn_futureUnknownRegion_crossRegion_correctlyInfersPartition() { + verifyAccesspointArn("http", + "arn:aws:s3:us-future-2:12345678910:accesspoint:foobar", + "http://foobar-12345678910.s3-accesspoint.us-future-2.amazonaws.com", + Region.of("us-future-2"), + S3Configuration.builder().useArnRegionEnabled(true), + Region.of("us-future-1")); + } + + @Test + public void accesspointArn_futureUnknownRegion_CN_correctlyInfersPartition() { + verifyAccesspointArn("http", + "arn:aws-cn:s3:cn-future-1:12345678910:accesspoint:foobar", + "http://foobar-12345678910.s3-accesspoint.cn-future-1.amazonaws.com.cn", + Region.of("cn-future-1"), + S3Configuration.builder(), + Region.of("cn-future-1")); + } + + @Test + public void accesspointArn_futureUnknownRegionAndPartition_defaultsToAws() { + verifyAccesspointArn("http", + "arn:aws:s3:unknown:12345678910:accesspoint:foobar", + "http://foobar-12345678910.s3-accesspoint.unknown.amazonaws.com", + Region.of("unknown"), + S3Configuration.builder(), + Region.of("unknown")); + } + + @Test + public void malformedArn_throwsIllegalArgumentException() { + assertThatThrownBy(() -> verifyAccesspointArn("http", + "arn:foobar", + null, + S3Configuration.builder())) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("ARN"); + } + + @Test + public void unsupportedArn_throwsIllegalArgumentException() { + assertThatThrownBy(() -> verifyAccesspointArn("http", + "arn:aws:s3:us-east-1:12345678910:unsupported:foobar", + null, + S3Configuration.builder())) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("ARN"); + } + + @Test + public void accesspointArn_invalidPartition_throwsIllegalArgumentException() { + assertThatThrownBy(() -> verifyAccesspointArn("http", + "arn:bar:s3:us-east-1:12345678910:accesspoint:foobar", + null, + S3Configuration.builder())) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("bar"); + } + + @Test + public void bucketArn_throwsIllegalArgumentException() { + assertThatThrownBy(() -> verifyAccesspointArn("http", + "arn:aws:s3:us-east-1:12345678910:bucket_name:foobar", + null, + S3Configuration.builder())) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("bucket parameter"); + } + + + @Test + public void accesspointArn_withSlashes_shouldConvertEndpoint() { + verifyAccesspointArn("http", + "arn:aws:s3:us-east-1:12345678910:accesspoint/foobar", + "http://foobar-12345678910.s3-accesspoint.us-east-1.amazonaws.com", + S3Configuration.builder()); + verifyAccesspointArn("https", + "arn:aws:s3:us-east-1:12345678910:accesspoint/foobar", + "https://foobar-12345678910.s3-accesspoint.us-east-1.amazonaws.com", + S3Configuration.builder()); + } + + @Test + public void accesspointArn_withDualStackEnabled_shouldConvertEndpoint() { + verifyAccesspointArn("http", + "arn:aws:s3:us-east-1:12345678910:accesspoint/foobar", + "http://foobar-12345678910.s3-accesspoint.dualstack.us-east-1.amazonaws.com", + S3Configuration.builder().dualstackEnabled(true)); + verifyAccesspointArn("https", + "arn:aws:s3:us-east-1:12345678910:accesspoint/foobar", + "https://foobar-12345678910.s3-accesspoint.dualstack.us-east-1.amazonaws.com", + S3Configuration.builder().dualstackEnabled(true)); + } + + @Test + public void accesspointArn_withCnPartition_shouldConvertEndpoint() { + verifyAccesspointArn("http", + "arn:aws-cn:s3:cn-north-1:12345678910:accesspoint:foobar", + "http://foobar-12345678910.s3-accesspoint.cn-north-1.amazonaws.com.cn", + Region.of("cn-north-1"), + S3Configuration.builder(), + Region.of("cn-north-1")); + verifyAccesspointArn("https", + "arn:aws-cn:s3:cn-north-1:12345678910:accesspoint:foobar", + "https://foobar-12345678910.s3-accesspoint.cn-north-1.amazonaws.com.cn", + Region.of("cn-north-1"), + S3Configuration.builder(), + Region.of("cn-north-1")); + } + + @Test + public void accesspointArn_withDifferentPartition_useArnRegionEnabled_shouldThrowIllegalArgumentException() { + assertThatThrownBy(() -> verifyAccesspointArn("http", + "arn:aws-cn:s3:cn-north-1:12345678910:accesspoint:foobar", + "http://foobar-12345678910.s3-accesspoint.cn-north-1.amazonaws.com.cn", + Region.of("cn-north-1"), + S3Configuration.builder().useArnRegionEnabled(true), + Region.of("us-east-1"))) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("partition"); + } + + @Test + public void accesspointArn_withFipsRegionPrefix_noFipsInArn_shouldConvertEndpoint() { + verifyAccesspointArn("http", + "arn:aws:s3:us-east-1:12345678910:accesspoint/foobar", + "http://foobar-12345678910.s3-accesspoint.fips-us-east-1.amazonaws.com", + Region.of("us-east-1"), + S3Configuration.builder(), + Region.of("fips-us-east-1")); + verifyAccesspointArn("https", + "arn:aws:s3:us-east-1:12345678910:accesspoint/foobar", + "https://foobar-12345678910.s3-accesspoint.fips-us-east-1.amazonaws.com", + Region.of("us-east-1"), + S3Configuration.builder(), + Region.of("fips-us-east-1")); + } + + @Test + public void accesspointArn_withFipsRegionPrefix_FipsInArn_shouldConvertEndpoint() { + verifyAccesspointArn("http", + "arn:aws:s3:fips-us-east-1:12345678910:accesspoint/foobar", + "http://foobar-12345678910.s3-accesspoint.fips-us-east-1.amazonaws.com", + Region.of("fips-us-east-1"), + S3Configuration.builder(), + Region.of("fips-us-east-1")); + verifyAccesspointArn("https", + "arn:aws:s3:fips-us-east-1:12345678910:accesspoint/foobar", + "https://foobar-12345678910.s3-accesspoint.fips-us-east-1.amazonaws.com", + Region.of("fips-us-east-1"), + S3Configuration.builder(), + Region.of("fips-us-east-1")); + } + + @Test + public void accesspointArn_withFipsRegionPrefix_noFipsInArn_useArnRegionEnabled_shouldConvertEndpoint() { + verifyAccesspointArn("http", + "arn:aws:s3:us-east-1:12345678910:accesspoint/foobar", + "http://foobar-12345678910.s3-accesspoint.us-east-1.amazonaws.com", + Region.of("us-east-1"), + S3Configuration.builder().useArnRegionEnabled(true), + Region.of("fips-us-east-1")); + verifyAccesspointArn("https", + "arn:aws:s3:us-east-1:12345678910:accesspoint/foobar", + "https://foobar-12345678910.s3-accesspoint.us-east-1.amazonaws.com", + Region.of("us-east-1"), + S3Configuration.builder().useArnRegionEnabled(true), + Region.of("fips-us-east-1")); + } + + + @Test + public void accesspointArn_withFipsRegionPrefix_FipsInArn_useArnRegionEnabled_shouldConvertEndpoint() { + verifyAccesspointArn("http", + "arn:aws:s3:fips-us-east-1:12345678910:accesspoint/foobar", + "http://foobar-12345678910.s3-accesspoint.fips-us-east-1.amazonaws.com", + Region.of("fips-us-east-1"), + S3Configuration.builder().useArnRegionEnabled(true), + Region.of("fips-us-east-1")); + verifyAccesspointArn("https", + "arn:aws:s3:fips-us-east-1:12345678910:accesspoint/foobar", + "https://foobar-12345678910.s3-accesspoint.fips-us-east-1.amazonaws.com", + Region.of("fips-us-east-1"), + S3Configuration.builder().useArnRegionEnabled(true), + Region.of("fips-us-east-1")); + } + + + + @Test + public void accesspointArn_withFipsRegionPrefix_ArnRegionNotMatches_shouldThrowIllegalArgumentException() { + assertThatThrownBy(() -> verifyAccesspointArn("http", + "arn:aws:s3:us-east-1:12345678910:accesspoint/foobar", + "http://foobar-12345678910.s3-accesspoint.us-east-1.amazonaws.com", + Region.of("us-east-1"), + S3Configuration.builder(), + Region.of("fips-us-gov-east-1"))) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("The region field of the ARN being passed as a bucket parameter to an S3 operation does not match the region the client was configured with."); + assertThatThrownBy(() -> verifyAccesspointArn("https", + "arn:aws:s3:us-east-1:12345678910:accesspoint/foobar", + "https://foobar-12345678910.s3-accesspoint.us-east-1.amazonaws.com", + Region.of("us-east-1"), + S3Configuration.builder(), + Region.of("fips-us-gov-east-1"))) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("The region field of the ARN being passed as a bucket parameter to an S3 operation does not match the region the client was configured with."); + } + + @Test + public void accesspointArn_withFipsRegionPrefix_noFipsInArn_DualstackEnabled_shouldConvertEndpoint() { + verifyAccesspointArn("http", + "arn:aws:s3:us-east-1:12345678910:accesspoint/foobar", + "http://foobar-12345678910.s3-accesspoint.dualstack.fips-us-east-1.amazonaws.com", + Region.of("us-east-1"), + S3Configuration.builder().dualstackEnabled(true), + Region.of("fips-us-east-1")); + verifyAccesspointArn("https", + "arn:aws:s3:us-east-1:12345678910:accesspoint/foobar", + "https://foobar-12345678910.s3-accesspoint.dualstack.fips-us-east-1.amazonaws.com", + Region.of("us-east-1"), + S3Configuration.builder().dualstackEnabled(true), + Region.of("fips-us-east-1")); + } + + @Test + public void accesspointArn_withFipsRegionPrefix_FipsInArn_DualStackEnabled_shouldConvertEndpoint() { + verifyAccesspointArn("http", + "arn:aws:s3:fips-us-east-1:12345678910:accesspoint/foobar", + "http://foobar-12345678910.s3-accesspoint.dualstack.fips-us-east-1.amazonaws.com", + Region.of("fips-us-east-1"), + S3Configuration.builder().dualstackEnabled(true), + Region.of("fips-us-east-1")); + verifyAccesspointArn("https", + "arn:aws:s3:fips-us-east-1:12345678910:accesspoint/foobar", + "https://foobar-12345678910.s3-accesspoint.dualstack.fips-us-east-1.amazonaws.com", + Region.of("fips-us-east-1"), + S3Configuration.builder().dualstackEnabled(true), + Region.of("fips-us-east-1")); + } + + @Test + public void accesspointArn_withFipsRegionSuffix_noFipsinArn_shouldConvertEndpoint() { + verifyAccesspointArn("http", + "arn:aws:s3:us-east-1:12345678910:accesspoint/foobar", + "http://foobar-12345678910.s3-accesspoint.fips-us-east-1.amazonaws.com", + Region.of("us-east-1"), + S3Configuration.builder(), + Region.of("us-east-1-fips")); + verifyAccesspointArn("https", + "arn:aws:s3:us-east-1:12345678910:accesspoint/foobar", + "https://foobar-12345678910.s3-accesspoint.fips-us-east-1.amazonaws.com", + Region.of("us-east-1"), + S3Configuration.builder(), + Region.of("us-east-1-fips")); + } + + @Test + public void accesspointArn_noFipsRegionPrefix_FipsInArn_shouldConvertEndpoint() { + verifyAccesspointArn("http", + "arn:aws:s3:fips-us-east-1:12345678910:accesspoint/foobar", + "http://foobar-12345678910.s3-accesspoint.us-east-1.amazonaws.com", + Region.of("fips-us-east-1"), + S3Configuration.builder(), + Region.of("us-east-1")); + verifyAccesspointArn("https", + "arn:aws:s3:fips-us-east-1:12345678910:accesspoint/foobar", + "https://foobar-12345678910.s3-accesspoint.us-east-1.amazonaws.com", + Region.of("fips-us-east-1"), + S3Configuration.builder(), + Region.of("us-east-1")); + } + + @Test + public void accesspointArn_noFipsRegionPrefix_FipsInArn_useArnRegionEnabled_shouldConvertEndpoint() { + verifyAccesspointArn("http", + "arn:aws:s3:fips-us-east-1:12345678910:accesspoint/foobar", + "http://foobar-12345678910.s3-accesspoint.fips-us-east-1.amazonaws.com", + Region.of("fips-us-east-1"), + S3Configuration.builder().useArnRegionEnabled(true), + Region.of("us-east-1")); + verifyAccesspointArn("https", + "arn:aws:s3:fips-us-east-1:12345678910:accesspoint/foobar", + "https://foobar-12345678910.s3-accesspoint.fips-us-east-1.amazonaws.com", + Region.of("fips-us-east-1"), + S3Configuration.builder().useArnRegionEnabled(true), + Region.of("us-east-1")); + } + + @Test + public void accesspointArn_noFipsRegionPrefix_FipsInArn_useArnRegionEnabled_DualstackEnabled_shouldConvertEndpoint() { + verifyAccesspointArn("http", + "arn:aws:s3:fips-us-east-1:12345678910:accesspoint/foobar", + "http://foobar-12345678910.s3-accesspoint.dualstack.fips-us-east-1.amazonaws.com", + Region.of("fips-us-east-1"), + S3Configuration.builder().useArnRegionEnabled(true).dualstackEnabled(true), + Region.of("us-east-1")); + verifyAccesspointArn("https", + "arn:aws:s3:fips-us-east-1:12345678910:accesspoint/foobar", + "https://foobar-12345678910.s3-accesspoint.dualstack.fips-us-east-1.amazonaws.com", + Region.of("fips-us-east-1"), + S3Configuration.builder().useArnRegionEnabled(true).dualstackEnabled(true), + Region.of("us-east-1")); + } + + @Test + public void accesspointArn_withAccelerateEnabled_shouldThrowIllegalArgumentException() { + assertThatThrownBy(() -> verifyAccesspointArn("http", + "arn:aws:s3:us-east-1:12345678910:accesspoint/foobar", + "http://foobar-12345678910.s3-accesspoint.us-east-1.amazonaws.com", + Region.of("us-east-1"), + S3Configuration.builder().accelerateModeEnabled(true), + Region.of("us-east-1"))) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("accelerate"); + } + + + @Test + public void accesspointArn_withPathStyleAddressingEnabled_shouldThrowIllegalArgumentException() { + assertThatThrownBy(() -> verifyAccesspointArn("http", + "arn:aws:s3:us-east-1:12345678910:accesspoint/foobar", + "http://foobar-12345678910.s3-accesspoint.us-east-1.amazonaws.com", + Region.of("us-east-1"), + S3Configuration.builder().pathStyleAccessEnabled(true), + Region.of("us-east-1"))) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("path style"); + } + + @Test + public void outpostAccessPointArn_shouldConvertEndpoint() { + verifyAccesspointArn("http", + "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "http://myaccesspoint-123456789012.op-01234567890123456.s3-outposts.us-west-2.amazonaws.com", + Region.of("us-west-2"), + S3Configuration.builder(), + Region.of("us-west-2")); + + verifyAccesspointArn("https", + "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "https://myaccesspoint-123456789012.op-01234567890123456.s3-outposts.us-west-2.amazonaws.com", + Region.of("us-west-2"), + S3Configuration.builder(), + Region.of("us-west-2")); + } + + @Test + public void outpostAccessPointArn_futureUnknownRegion_US_correctlyInfersPartition() { + verifyAccesspointArn("http", + "arn:aws:s3-outposts:us-future-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "http://myaccesspoint-123456789012.op-01234567890123456.s3-outposts.us-future-2.amazonaws.com", + Region.of("us-future-2"), + S3Configuration.builder(), + Region.of("us-future-2")); + } + + @Test + public void outpostAccessPointArn_futureUnknownRegion_crossRegion_correctlyInfersPartition() { + verifyAccesspointArn("http", + "arn:aws:s3-outposts:us-future-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "http://myaccesspoint-123456789012.op-01234567890123456.s3-outposts.us-future-2.amazonaws.com", + Region.of("us-future-2"), + S3Configuration.builder().useArnRegionEnabled(true), + Region.of("us-future-1")); + } + + @Test + public void outpostAccessPointArn_futureUnknownRegion_CN_correctlyInfersPartition() { + verifyAccesspointArn("http", + "arn:aws-cn:s3-outposts:cn-future-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "http://myaccesspoint-123456789012.op-01234567890123456.s3-outposts.cn-future-1.amazonaws.com.cn", + Region.of("cn-future-1"), + S3Configuration.builder(), + Region.of("cn-future-1")); + } + + @Test + public void outpostAccessPointArn_futureUnknownRegionAndPartition_defaultsToAws() { + verifyAccesspointArn("http", + "arn:aws:s3-outposts:unknown:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "http://myaccesspoint-123456789012.op-01234567890123456.s3-outposts.unknown.amazonaws.com", + Region.of("unknown"), + S3Configuration.builder(), + Region.of("unknown")); + } + + @Test + public void outpostAccessPointArn_invalidPartition_throwsIllegalArgumentException() { + assertThatThrownBy(() -> verifyAccesspointArn("http", + "arn:bar:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + null, + S3Configuration.builder())) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("bar"); + } + + @Test + public void outpostAccessPointArn_differentRegionWithoutUseArnRegion_throwsIllegalArgumentException() { + assertThatThrownBy(() -> verifyAccesspointArn("http", + "arn:bar:aws-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + null, + S3Configuration.builder())) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("region"); + } + + @Test + public void outpostAccessPointArn_fipsEnabled_throwsIllegalArgumentException() { + assertThatThrownBy(() -> verifyAccesspointArn("http", + "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + null, + Region.of("us-east-1"), + S3Configuration.builder().useArnRegionEnabled(true), + Region.of("fips-us-east-1"))) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("FIPS"); + } + + @Test + public void outpostAccessPointArn_dualStackEnabled_throwsIllegalArgumentException() { + assertThatThrownBy(() -> verifyAccesspointArn("http", + "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + null, + Region.of("us-east-1"), + S3Configuration.builder().dualstackEnabled(true), + Region.of("us-east-1"))) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("dualstack"); + } + + @Test + public void outpostAccessPointArn_accelerateEnabled_throwsIllegalArgumentException() { + assertThatThrownBy(() -> verifyAccesspointArn("http", + "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + null, + Region.of("us-east-1"), + S3Configuration.builder().accelerateModeEnabled(true), + Region.of("us-east-1"))) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("accelerate"); + } + + @Test + public void outpostAccessPointArn_ArnMissingAccesspointName_throwsIllegalArgumentException() { + assertThatThrownBy(() -> verifyAccesspointArn("http", + "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456", + null, + Region.of("us-east-1"), + S3Configuration.builder().accelerateModeEnabled(true), + Region.of("us-east-1"))) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("Invalid format"); + } + + private void verifyAccesspointArn(String protocol, String accessPointArn, String expectedEndpoint, + S3Configuration.Builder builder) { + verifyAccesspointArn(protocol, accessPointArn, expectedEndpoint, Region.US_EAST_1, builder, Region.US_EAST_1); + } + + private void verifyAccesspointArn(String protocol, String accessPointArn, String expectedEndpoint, + Region expectedSigningRegion, + S3Configuration.Builder configBuilder, Region region) { + String key = "test-key"; + + URI customUri = URI.create(String.format("%s://s3-test.com/%s/%s", protocol, urlEncode(accessPointArn), key)); + URI expectedUri = URI.create(String.format("%s/%s", expectedEndpoint, key)); + PutObjectRequest putObjectRequest = PutObjectRequest.builder() + .bucket(accessPointArn) + .key(key) + .build(); + + S3EndpointResolverContext context = S3EndpointResolverContext.builder() + .request(InterceptorTestUtils.sdkHttpRequest(customUri)) + .originalRequest(putObjectRequest) + .region(region) + .serviceConfiguration(configBuilder.build()) + .build(); + + ConfiguredS3SdkHttpRequest sdkHttpFullRequest = endpointResolver.applyEndpointConfiguration(context); + + assertThat(sdkHttpFullRequest.sdkHttpRequest().getUri()).isEqualTo(expectedUri); + assertThat(sdkHttpFullRequest.signingRegionModification()).isPresent(); + assertThat(sdkHttpFullRequest.signingRegionModification().get()).isEqualTo(expectedSigningRegion); + assertSigningRegion(accessPointArn, sdkHttpFullRequest); + } + + private void assertSigningRegion(String accessPointArn, ConfiguredS3SdkHttpRequest sdkHttpFullRequest) { + if (accessPointArn.contains(":s3-outposts")) { + String expectedSigningName = "s3-outposts"; + assertThat(sdkHttpFullRequest.signingServiceModification()).isPresent(); + assertThat(sdkHttpFullRequest.signingServiceModification().get()).isEqualTo(expectedSigningName); + } else { + assertThat(sdkHttpFullRequest.signingServiceModification()).isEmpty(); + } + } + +} diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/endpoints/S3BucketEndpointResolverTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/endpoints/S3BucketEndpointResolverTest.java new file mode 100644 index 000000000000..9acfa4032800 --- /dev/null +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/endpoints/S3BucketEndpointResolverTest.java @@ -0,0 +1,159 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.endpoints; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.net.URI; +import org.junit.Before; +import org.junit.Test; +import software.amazon.awssdk.core.SdkRequest; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpMethod; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Configuration; +import software.amazon.awssdk.services.s3.internal.ConfiguredS3SdkHttpRequest; +import software.amazon.awssdk.services.s3.model.CreateBucketRequest; +import software.amazon.awssdk.services.s3.model.DeleteBucketRequest; +import software.amazon.awssdk.services.s3.model.ListBucketsRequest; +import software.amazon.awssdk.services.s3.model.ListObjectsV2Request; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.utils.InterceptorTestUtils; + +public class S3BucketEndpointResolverTest { + + S3BucketEndpointResolver endpointResolver; + + @Before + public void setUp() throws Exception { + endpointResolver = S3BucketEndpointResolver.create(); + } + + @Test + public void traditionalEndpoint_shouldNotConvertEndpoint() { + verifyEndpoint("http", "http://s3-test.com", S3Configuration.builder()); + verifyEndpoint("https", "https://s3-test.com", S3Configuration.builder()); + } + + @Test + public void accelerateEnabled_shouldConvertToAccelerateEndpoint() { + verifyEndpoint("http", + "http://s3-accelerate.amazonaws.com", + S3Configuration.builder().accelerateModeEnabled(true)); + verifyEndpoint("https", "https://s3-accelerate.amazonaws.com", + S3Configuration.builder().accelerateModeEnabled(true)); + } + + @Test + public void bothAccelerateDualstackEnabled_shouldConvertToAccelerateDualstackEndpoint() { + verifyEndpoint("http", + "http://s3-accelerate.dualstack.amazonaws.com", + S3Configuration.builder().accelerateModeEnabled(true).dualstackEnabled(true) + ); + verifyEndpoint("https", + "https://s3-accelerate.dualstack.amazonaws.com", + S3Configuration.builder().accelerateModeEnabled(true).dualstackEnabled(true)); + } + + @Test + public void pathStyleAccessEnabled_shouldNotConvertToDnsEndpoint() { + verifyEndpoint("http", + "http://s3-test.com", + S3Configuration.builder().pathStyleAccessEnabled(true)); + verifyEndpoint("https", + "https://s3-test.com", + S3Configuration.builder().pathStyleAccessEnabled(true)); + } + + @Test + public void dualstackEnabled_shouldConvertToDualstackEndpoint() { + verifyEndpoint("http", "http://s3.dualstack.us-east-1.amazonaws.com", + S3Configuration.builder().dualstackEnabled(true)); + verifyEndpoint("https", "https://s3.dualstack.us-east-1.amazonaws.com", + S3Configuration.builder().dualstackEnabled(true)); + } + + @Test + public void accelerateEnabled_ListBucketRequest_shouldNotConvertToAccelerateEndpoint() { + verifyAccelerateDisabledOperationsEndpointNotConverted(ListBucketsRequest.builder().build()); + } + + @Test + public void accelerateEnabled_CreateBucketsRequest_shouldNotConvertToAccelerateEndpoint() { + verifyAccelerateDisabledOperationsEndpointNotConverted(CreateBucketRequest.builder().build()); + } + + @Test + public void accelerateEnabled_DeleteBucketRequest_shouldNotConvertToAccelerateEndpoint() { + verifyAccelerateDisabledOperationsEndpointNotConverted(DeleteBucketRequest.builder().build()); + } + + @Test + public void virtualStyle_shouldConvertToDnsEndpoint() { + verifyVirtualStyleConvertDnsEndpoint("https"); + verifyVirtualStyleConvertDnsEndpoint("http"); + } + + private void verifyVirtualStyleConvertDnsEndpoint(String protocol) { + String bucketName = "test-bucket"; + String key = "test-key"; + URI customUri = URI.create(String.format("%s://s3-test.com/%s/%s", protocol, bucketName, key)); + URI expectedUri = URI.create(String.format("%s://%s.s3.dualstack.us-east-1.amazonaws.com/%s", protocol, + bucketName, key)); + S3EndpointResolverContext context = S3EndpointResolverContext.builder() + .request(InterceptorTestUtils.sdkHttpRequest(customUri)) + .originalRequest(ListObjectsV2Request.builder().bucket(bucketName).build()) + .region(Region.US_EAST_1) + .serviceConfiguration(S3Configuration.builder().dualstackEnabled(true).build()) + .build(); + ConfiguredS3SdkHttpRequest sdkHttpFullRequest = endpointResolver.applyEndpointConfiguration(context); + + assertThat(sdkHttpFullRequest.sdkHttpRequest().getUri()).isEqualTo(expectedUri); + } + + private void verifyAccelerateDisabledOperationsEndpointNotConverted(SdkRequest request) { + URI customUri = URI.create("http://s3-test.com"); + S3EndpointResolverContext context = S3EndpointResolverContext.builder() + .request(InterceptorTestUtils.sdkHttpRequest(customUri)) + .originalRequest(request) + .region(Region.US_EAST_1) + .serviceConfiguration(S3Configuration.builder().accelerateModeEnabled(true).build()) + .build(); + ConfiguredS3SdkHttpRequest sdkHttpFullRequest = endpointResolver.applyEndpointConfiguration(context); + assertThat(sdkHttpFullRequest.sdkHttpRequest().getUri()).isEqualTo(customUri); + } + + private void verifyEndpoint(String protocol, + String expectedEndpoint, + S3Configuration.Builder configBuilder) { + String bucket = "test-bucket"; + String key = "test-key"; + URI customUri = URI.create(String.format("%s://s3-test.com/%s/%s", protocol, bucket, key)); + URI expectedUri = URI.create(String.format("%s/%s/%s", expectedEndpoint, bucket, key)); + + S3EndpointResolverContext context = S3EndpointResolverContext.builder() + .request(InterceptorTestUtils.sdkHttpRequest(customUri)) + .originalRequest(PutObjectRequest.builder().build()) + .region(Region.US_EAST_1) + .serviceConfiguration(configBuilder.build()) + .build(); + + ConfiguredS3SdkHttpRequest sdkHttpFullRequest = endpointResolver.applyEndpointConfiguration(context); + assertThat(sdkHttpFullRequest.sdkHttpRequest().getUri()).isEqualTo(expectedUri); + } + +} diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/endpoints/S3EndpointResolverContextTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/endpoints/S3EndpointResolverContextTest.java new file mode 100644 index 000000000000..9b846ad9cda2 --- /dev/null +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/endpoints/S3EndpointResolverContextTest.java @@ -0,0 +1,59 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.endpoints; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import org.junit.Test; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpMethod; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Configuration; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; + +public class S3EndpointResolverContextTest { + + @Test + public void toBuilder_minimal() { + S3EndpointResolverContext context = S3EndpointResolverContext.builder().build(); + assertFalse(context.endpointOverridden()); + assertNull(context.originalRequest()); + assertNull(context.region()); + assertNull(context.serviceConfiguration()); + assertNull(context.request()); + } + + @Test + public void toBuilder_maximal() { + S3Configuration serviceConfiguration = S3Configuration.builder().build(); + SdkHttpFullRequest httpRequest = SdkHttpFullRequest.builder().protocol("http").host("host").method(SdkHttpMethod.POST).build(); + S3EndpointResolverContext context = S3EndpointResolverContext.builder() + .endpointOverridden(true) + .originalRequest(PutObjectRequest.builder().build()) + .region(Region.US_EAST_1) + .serviceConfiguration(serviceConfiguration) + .request(httpRequest) + .build(); + assertTrue(context.endpointOverridden()); + assertThat(context.originalRequest()).isInstanceOf(PutObjectRequest.class); + assertThat(context.region()).isEqualTo(Region.US_EAST_1); + assertThat(context.serviceConfiguration()).isEqualTo(serviceConfiguration); + assertThat(context.request()).isEqualTo(httpRequest); + } +} diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/endpoints/S3EndpointResolverFactoryTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/endpoints/S3EndpointResolverFactoryTest.java new file mode 100644 index 000000000000..ad5f86b8dd06 --- /dev/null +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/endpoints/S3EndpointResolverFactoryTest.java @@ -0,0 +1,47 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.endpoints; + +import static org.assertj.core.api.Assertions.assertThat; + +import org.junit.Test; + +public class S3EndpointResolverFactoryTest { + + @Test + public void nullBucketName_returnsBucketEndpointResolver() { + assertThat(S3EndpointResolverFactory.getEndpointResolver(null)).isInstanceOf(S3BucketEndpointResolver.class); + } + + @Test + public void emptyBucketName_returnsBucketEndpointResolver() { + String bucketName = ""; + assertThat(S3EndpointResolverFactory.getEndpointResolver(bucketName)).isInstanceOf(S3BucketEndpointResolver.class); + } + + @Test + public void nonAccessPointBucketName_returnsBucketEndpointResolver() { + String bucketName = "test-bucket"; + assertThat(S3EndpointResolverFactory.getEndpointResolver(bucketName)).isInstanceOf(S3BucketEndpointResolver.class); + } + + @Test + public void accessPointBucketName_returnsAccessPointEndpointResolver() { + String bucketName = "arn:aws:s3:us-east-1:12345678910:accesspoint/foobar"; + assertThat(S3EndpointResolverFactory.getEndpointResolver(bucketName)).isInstanceOf(S3AccessPointEndpointResolver.class); + } + +} diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/endpoints/S3EndpointUtilsTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/endpoints/S3EndpointUtilsTest.java new file mode 100644 index 000000000000..c2983aef2d12 --- /dev/null +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/endpoints/S3EndpointUtilsTest.java @@ -0,0 +1,124 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.endpoints; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.net.URI; +import org.junit.Test; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.services.s3.S3Configuration; +import software.amazon.awssdk.services.s3.model.ListBucketsRequest; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; + +public class S3EndpointUtilsTest { + + @Test + public void removesFipsIfNeeded() { + assertThat(S3EndpointUtils.removeFipsIfNeeded("fips-us-east-1")).isEqualTo("us-east-1"); + assertThat(S3EndpointUtils.removeFipsIfNeeded("us-east-1-fips")).isEqualTo("us-east-1"); + } + + @Test + public void isFipsRegion() { + assertTrue(S3EndpointUtils.isFipsRegion("fips-us-east-1")); + assertTrue(S3EndpointUtils.isFipsRegion("us-east-1-fips")); + assertFalse(S3EndpointUtils.isFipsRegion("us-fips-1")); + } + + @Test + public void isFipsRegionProvided() { + assertTrue(S3EndpointUtils.isFipsRegionProvided("fips-us-east-1", "us-east-1", false)); + assertFalse(S3EndpointUtils.isFipsRegionProvided("us-east-1", "fips-us-east-1", false)); + assertTrue(S3EndpointUtils.isFipsRegionProvided("us-east-1", "us-east-1-fips", true)); + assertFalse(S3EndpointUtils.isFipsRegionProvided("us-east-1-fips", "us-east-1", true)); + } + + @Test + public void isAccelerateEnabled() { + assertFalse(S3EndpointUtils.isAccelerateEnabled(S3Configuration.builder().build())); + assertFalse(S3EndpointUtils.isAccelerateEnabled(null)); + assertFalse(S3EndpointUtils.isAccelerateEnabled(S3Configuration.builder().accelerateModeEnabled(false).build())); + assertTrue(S3EndpointUtils.isAccelerateEnabled(S3Configuration.builder().accelerateModeEnabled(true).build())); + } + + @Test + public void isAccelerateSupported() { + assertFalse(S3EndpointUtils.isAccelerateSupported(ListBucketsRequest.builder().build())); + assertTrue(S3EndpointUtils.isAccelerateSupported(PutObjectRequest.builder().build())); + } + + @Test + public void accelerateEndpoint() { + assertThat(S3EndpointUtils.accelerateEndpoint(S3Configuration.builder().build(), + "domain", + "https")) + .isEqualTo(URI.create("https://s3-accelerate.domain")); + + assertThat(S3EndpointUtils.accelerateEndpoint(S3Configuration.builder().dualstackEnabled(true).build(), + "domain", + "https")) + .isEqualTo(URI.create("https://s3-accelerate.dualstack.domain")); + } + + @Test + public void isDualstackEnabled() { + assertFalse(S3EndpointUtils.isDualstackEnabled(S3Configuration.builder().build())); + assertFalse(S3EndpointUtils.isDualstackEnabled(null)); + assertFalse(S3EndpointUtils.isDualstackEnabled(S3Configuration.builder().dualstackEnabled(false).build())); + assertTrue(S3EndpointUtils.isDualstackEnabled(S3Configuration.builder().dualstackEnabled(true).build())); + } + + @Test + public void dualStackEndpoint() { + assertThat(S3EndpointUtils.dualstackEndpoint("id", "domain", "https")) + .isEqualTo(URI.create("https://s3.dualstack.id.domain")); + } + + @Test + public void isPathstyleAccessEnabled() { + assertFalse(S3EndpointUtils.isPathStyleAccessEnabled(S3Configuration.builder().build())); + assertFalse(S3EndpointUtils.isPathStyleAccessEnabled(null)); + assertFalse(S3EndpointUtils.isPathStyleAccessEnabled(S3Configuration.builder().pathStyleAccessEnabled(false).build())); + assertTrue(S3EndpointUtils.isPathStyleAccessEnabled(S3Configuration.builder().pathStyleAccessEnabled(true).build())); + } + + @Test + public void isArnRegionEnabled() { + assertFalse(S3EndpointUtils.isArnRegionEnabled(S3Configuration.builder().build())); + assertFalse(S3EndpointUtils.isArnRegionEnabled(null)); + assertFalse(S3EndpointUtils.isArnRegionEnabled(S3Configuration.builder().useArnRegionEnabled(false).build())); + assertTrue(S3EndpointUtils.isArnRegionEnabled(S3Configuration.builder().useArnRegionEnabled(true).build())); + } + + @Test + public void changeToDnsEndpoint() { + SdkHttpRequest.Builder mutableRequest = SdkHttpFullRequest.builder().host("s3").encodedPath("/test-bucket"); + S3EndpointUtils.changeToDnsEndpoint(mutableRequest, "test-bucket"); + assertThat(mutableRequest.host()).isEqualTo("test-bucket.s3"); + assertThat(mutableRequest.encodedPath()).isEqualTo(""); + } + + @Test + public void isArn() { + assertFalse(S3EndpointUtils.isArn("bucketName")); + assertFalse(S3EndpointUtils.isArn("test:arn:")); + assertTrue(S3EndpointUtils.isArn("arn:test")); + } +} diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/EndpointAddressInterceptorTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/EndpointAddressInterceptorTest.java index 47cba5b80f0f..c6c20fb17057 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/EndpointAddressInterceptorTest.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/EndpointAddressInterceptorTest.java @@ -16,7 +16,6 @@ package software.amazon.awssdk.services.s3.internal.handlers; import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; import static software.amazon.awssdk.auth.signer.AwsSignerExecutionAttribute.SERVICE_SIGNING_NAME; import static software.amazon.awssdk.auth.signer.AwsSignerExecutionAttribute.SIGNING_REGION; import static software.amazon.awssdk.awscore.AwsExecutionAttribute.AWS_REGION; @@ -24,704 +23,85 @@ import static software.amazon.awssdk.utils.http.SdkHttpUtils.urlEncode; import java.net.URI; -import java.util.Optional; +import org.junit.Before; import org.junit.Test; -import software.amazon.awssdk.auth.signer.AwsSignerExecutionAttribute; -import software.amazon.awssdk.core.SdkRequest; -import software.amazon.awssdk.core.async.AsyncRequestBody; import software.amazon.awssdk.core.interceptor.Context; import software.amazon.awssdk.core.interceptor.ExecutionAttributes; -import software.amazon.awssdk.core.sync.RequestBody; -import software.amazon.awssdk.http.SdkHttpFullRequest; -import software.amazon.awssdk.http.SdkHttpMethod; import software.amazon.awssdk.http.SdkHttpRequest; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.s3.S3Configuration; -import software.amazon.awssdk.services.s3.model.CreateBucketRequest; -import software.amazon.awssdk.services.s3.model.DeleteBucketRequest; -import software.amazon.awssdk.services.s3.model.ListBucketsRequest; -import software.amazon.awssdk.services.s3.model.ListObjectsV2Request; import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.utils.InterceptorTestUtils; public class EndpointAddressInterceptorTest { - private final EndpointAddressInterceptor interceptor = new EndpointAddressInterceptor(); + private static final String AP_ARN = "arn:aws:s3:us-west-2:123456789012:accesspoint:foobar"; + private static final String OUTPOSTS_ARN = "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456" + + ":accesspoint:myaccesspoint"; + private static final String KEY = "test-key"; + private static final String DEFAULT_SIGNING_NAME = "s3"; + private static final String OUTPOSTS_SIGNING_NAME = "s3-outposts"; + private static final Region DEFAULT_REGION = Region.US_WEST_2; - @Test - public void traditionalEndpoint_shouldNotConvertEndpoint() { - verifyEndpoint("http", "http://s3-test.com", - S3Configuration.builder()); - - verifyEndpoint("https", "https://s3-test.com", - S3Configuration.builder()); - } - - @Test - public void accelerateEnabled_shouldConvertToAccelerateEndpoint() { - verifyEndpoint("http", "http://s3-accelerate.amazonaws.com", - S3Configuration.builder().accelerateModeEnabled(true)); - verifyEndpoint("https", "https://s3-accelerate.amazonaws.com", - S3Configuration.builder().accelerateModeEnabled(true)); - } - - @Test - public void bothAccelerateDualstackEnabled_shouldConvertToAccelerateDualstackEndpoint() { - S3Configuration.Builder configurationBuilder = S3Configuration.builder() - .dualstackEnabled(true) - .accelerateModeEnabled(true); - verifyEndpoint("http", - "http://s3-accelerate.dualstack.amazonaws.com", - S3Configuration.builder() - .accelerateModeEnabled(true) - .dualstackEnabled(true) - ); - verifyEndpoint("https", - "https://s3-accelerate.dualstack.amazonaws.com", - configurationBuilder); - } - - @Test - public void accelerateEnabled_ListBucketRequest_shouldNotConvertToAccelerateEndpoint() { - verifyAccelerateDisabledOperationsEndpointNotConverted(ListBucketsRequest.builder().build()); - } - - @Test - public void accelerateEnabled_CreateBucketsRequest_shouldNotConvertToAccelerateEndpoint() { - verifyAccelerateDisabledOperationsEndpointNotConverted(CreateBucketRequest.builder().build()); - } - - @Test - public void accelerateEnabled_DeleteBucketRequest_shouldNotConvertToAccelerateEndpoint() { - verifyAccelerateDisabledOperationsEndpointNotConverted(DeleteBucketRequest.builder().build()); - } - - @Test - public void dualstackEnabled_shouldConvertToDualstackEndpoint() { - verifyEndpoint("http", "http://s3.dualstack.us-east-1.amazonaws.com", - S3Configuration.builder().dualstackEnabled(true)); - verifyEndpoint("https", "https://s3.dualstack.us-east-1.amazonaws.com", - S3Configuration.builder().dualstackEnabled(true)); - } - - @Test - public void virtualStyle_shouldConvertToDnsEndpoint() { - verifyVirtualStyleConvertDnsEndpoint("https"); - verifyVirtualStyleConvertDnsEndpoint("http"); - } - - @Test - public void pathStyleAccessEnabled_shouldNotConvertToDnsEndpoint() { - verifyEndpoint("http", "http://s3-test.com", - S3Configuration.builder().pathStyleAccessEnabled(true)); - verifyEndpoint("https", "https://s3-test.com", - S3Configuration.builder().pathStyleAccessEnabled(true)); - } - - @Test - public void accesspointArn_shouldConvertEndpoint() { - verifyAccesspointArn("http", - "arn:aws:s3:us-east-1:12345678910:accesspoint:foobar", - "http://foobar-12345678910.s3-accesspoint.us-east-1.amazonaws.com", - S3Configuration.builder()); - verifyAccesspointArn("https", - "arn:aws:s3:us-east-1:12345678910:accesspoint:foobar", - "https://foobar-12345678910.s3-accesspoint.us-east-1.amazonaws.com", - S3Configuration.builder()); - } - - @Test - public void accesspointArn_futureUnknownRegion_US_correctlyInfersPartition() { - verifyAccesspointArn("http", - "arn:aws:s3:us-future-1:12345678910:accesspoint:foobar", - "http://foobar-12345678910.s3-accesspoint.us-future-1.amazonaws.com", - Region.of("us-future-1"), - S3Configuration.builder(), - Region.of("us-future-1")); - } - - @Test - public void accesspointArn_futureUnknownRegion_crossRegion_correctlyInfersPartition() { - verifyAccesspointArn("http", - "arn:aws:s3:us-future-2:12345678910:accesspoint:foobar", - "http://foobar-12345678910.s3-accesspoint.us-future-2.amazonaws.com", - Region.of("us-future-2"), - S3Configuration.builder().useArnRegionEnabled(true), - Region.of("us-future-1")); - } - - @Test - public void accesspointArn_futureUnknownRegion_CN_correctlyInfersPartition() { - verifyAccesspointArn("http", - "arn:aws-cn:s3:cn-future-1:12345678910:accesspoint:foobar", - "http://foobar-12345678910.s3-accesspoint.cn-future-1.amazonaws.com.cn", - Region.of("cn-future-1"), - S3Configuration.builder(), - Region.of("cn-future-1")); - } - - @Test - public void accesspointArn_futureUnknownRegionAndPartition_defaultsToAws() { - verifyAccesspointArn("http", - "arn:aws:s3:unknown:12345678910:accesspoint:foobar", - "http://foobar-12345678910.s3-accesspoint.unknown.amazonaws.com", - Region.of("unknown"), - S3Configuration.builder(), - Region.of("unknown")); - } - - @Test - public void malformedArn_throwsIllegalArgumentException() { - assertThatThrownBy(() -> verifyAccesspointArn("http", - "arn:foobar", - null, - S3Configuration.builder())) - .isInstanceOf(IllegalArgumentException.class) - .hasMessageContaining("ARN"); - } - - @Test - public void unsupportedArn_throwsIllegalArgumentException() { - assertThatThrownBy(() -> verifyAccesspointArn("http", - "arn:aws:s3:us-east-1:12345678910:unsupported:foobar", - null, - S3Configuration.builder())) - .isInstanceOf(IllegalArgumentException.class) - .hasMessageContaining("ARN"); - } - - @Test - public void accesspointArn_invalidPartition_throwsIllegalArgumentException() { - assertThatThrownBy(() -> verifyAccesspointArn("http", - "arn:bar:s3:us-east-1:12345678910:accesspoint:foobar", - null, - S3Configuration.builder())) - .isInstanceOf(IllegalArgumentException.class) - .hasMessageContaining("bar"); - } - - @Test - public void bucketArn_throwsIllegalArgumentException() { - assertThatThrownBy(() -> verifyAccesspointArn("http", - "arn:aws:s3:us-east-1:12345678910:bucket_name:foobar", - null, - S3Configuration.builder())) - .isInstanceOf(IllegalArgumentException.class) - .hasMessageContaining("bucket parameter"); - } - - - @Test - public void accesspointArn_withSlashes_shouldConvertEndpoint() { - verifyAccesspointArn("http", - "arn:aws:s3:us-east-1:12345678910:accesspoint/foobar", - "http://foobar-12345678910.s3-accesspoint.us-east-1.amazonaws.com", - S3Configuration.builder()); - verifyAccesspointArn("https", - "arn:aws:s3:us-east-1:12345678910:accesspoint/foobar", - "https://foobar-12345678910.s3-accesspoint.us-east-1.amazonaws.com", - S3Configuration.builder()); - } - - @Test - public void accesspointArn_withDualStackEnabled_shouldConvertEndpoint() { - verifyAccesspointArn("http", - "arn:aws:s3:us-east-1:12345678910:accesspoint/foobar", - "http://foobar-12345678910.s3-accesspoint.dualstack.us-east-1.amazonaws.com", - S3Configuration.builder().dualstackEnabled(true)); - verifyAccesspointArn("https", - "arn:aws:s3:us-east-1:12345678910:accesspoint/foobar", - "https://foobar-12345678910.s3-accesspoint.dualstack.us-east-1.amazonaws.com", - S3Configuration.builder().dualstackEnabled(true)); - } - - @Test - public void accesspointArn_withCnPartition_shouldConvertEndpoint() { - verifyAccesspointArn("http", - "arn:aws-cn:s3:cn-north-1:12345678910:accesspoint:foobar", - "http://foobar-12345678910.s3-accesspoint.cn-north-1.amazonaws.com.cn", - Region.of("cn-north-1"), - S3Configuration.builder(), - Region.of("cn-north-1")); - verifyAccesspointArn("https", - "arn:aws-cn:s3:cn-north-1:12345678910:accesspoint:foobar", - "https://foobar-12345678910.s3-accesspoint.cn-north-1.amazonaws.com.cn", - Region.of("cn-north-1"), - S3Configuration.builder(), - Region.of("cn-north-1")); - } - - @Test - public void accesspointArn_withDifferentPartition_useArnRegionEnabled_shouldThrowIllegalArgumentException() { - assertThatThrownBy(() -> verifyAccesspointArn("http", - "arn:aws-cn:s3:cn-north-1:12345678910:accesspoint:foobar", - "http://foobar-12345678910.s3-accesspoint.cn-north-1.amazonaws.com.cn", - Region.of("cn-north-1"), - S3Configuration.builder().useArnRegionEnabled(true), - Region.of("us-east-1"))) - .isInstanceOf(IllegalArgumentException.class) - .hasMessageContaining("partition"); - } - - @Test - public void accesspointArn_withFipsRegionPrefix_noFipsInArn_shouldConvertEndpoint() { - verifyAccesspointArn("http", - "arn:aws:s3:us-east-1:12345678910:accesspoint/foobar", - "http://foobar-12345678910.s3-accesspoint.fips-us-east-1.amazonaws.com", - Region.of("us-east-1"), - S3Configuration.builder(), - Region.of("fips-us-east-1")); - verifyAccesspointArn("https", - "arn:aws:s3:us-east-1:12345678910:accesspoint/foobar", - "https://foobar-12345678910.s3-accesspoint.fips-us-east-1.amazonaws.com", - Region.of("us-east-1"), - S3Configuration.builder(), - Region.of("fips-us-east-1")); - } - - @Test - public void accesspointArn_withFipsRegionPrefix_FipsInArn_shouldConvertEndpoint() { - verifyAccesspointArn("http", - "arn:aws:s3:fips-us-east-1:12345678910:accesspoint/foobar", - "http://foobar-12345678910.s3-accesspoint.fips-us-east-1.amazonaws.com", - Region.of("fips-us-east-1"), - S3Configuration.builder(), - Region.of("fips-us-east-1")); - verifyAccesspointArn("https", - "arn:aws:s3:fips-us-east-1:12345678910:accesspoint/foobar", - "https://foobar-12345678910.s3-accesspoint.fips-us-east-1.amazonaws.com", - Region.of("fips-us-east-1"), - S3Configuration.builder(), - Region.of("fips-us-east-1")); - } - - @Test - public void accesspointArn_withFipsRegionPrefix_noFipsInArn_useArnRegionEnabled_shouldConvertEndpoint() { - verifyAccesspointArn("http", - "arn:aws:s3:us-east-1:12345678910:accesspoint/foobar", - "http://foobar-12345678910.s3-accesspoint.us-east-1.amazonaws.com", - Region.of("us-east-1"), - S3Configuration.builder().useArnRegionEnabled(true), - Region.of("fips-us-east-1")); - verifyAccesspointArn("https", - "arn:aws:s3:us-east-1:12345678910:accesspoint/foobar", - "https://foobar-12345678910.s3-accesspoint.us-east-1.amazonaws.com", - Region.of("us-east-1"), - S3Configuration.builder().useArnRegionEnabled(true), - Region.of("fips-us-east-1")); - } - - - @Test - public void accesspointArn_withFipsRegionPrefix_FipsInArn_useArnRegionEnabled_shouldConvertEndpoint() { - verifyAccesspointArn("http", - "arn:aws:s3:fips-us-east-1:12345678910:accesspoint/foobar", - "http://foobar-12345678910.s3-accesspoint.fips-us-east-1.amazonaws.com", - Region.of("fips-us-east-1"), - S3Configuration.builder().useArnRegionEnabled(true), - Region.of("fips-us-east-1")); - verifyAccesspointArn("https", - "arn:aws:s3:fips-us-east-1:12345678910:accesspoint/foobar", - "https://foobar-12345678910.s3-accesspoint.fips-us-east-1.amazonaws.com", - Region.of("fips-us-east-1"), - S3Configuration.builder().useArnRegionEnabled(true), - Region.of("fips-us-east-1")); - } + private EndpointAddressInterceptor interceptor; - - - @Test - public void accesspointArn_withFipsRegionPrefix_ArnRegionNotMatches_shouldThrowIllegalArgumentException() { - assertThatThrownBy(() -> verifyAccesspointArn("http", - "arn:aws:s3:us-east-1:12345678910:accesspoint/foobar", - "http://foobar-12345678910.s3-accesspoint.us-east-1.amazonaws.com", - Region.of("us-east-1"), - S3Configuration.builder(), - Region.of("fips-us-gov-east-1"))) - .isInstanceOf(IllegalArgumentException.class) - .hasMessageContaining("The region field of the ARN being passed as a bucket parameter to an S3 operation does not match the region the client was configured with."); - assertThatThrownBy(() -> verifyAccesspointArn("https", - "arn:aws:s3:us-east-1:12345678910:accesspoint/foobar", - "https://foobar-12345678910.s3-accesspoint.us-east-1.amazonaws.com", - Region.of("us-east-1"), - S3Configuration.builder(), - Region.of("fips-us-gov-east-1"))) - .isInstanceOf(IllegalArgumentException.class) - .hasMessageContaining("The region field of the ARN being passed as a bucket parameter to an S3 operation does not match the region the client was configured with."); - } - - @Test - public void accesspointArn_withFipsRegionPrefix_noFipsInArn_DualstackEnabled_shouldConvertEndpoint() { - verifyAccesspointArn("http", - "arn:aws:s3:us-east-1:12345678910:accesspoint/foobar", - "http://foobar-12345678910.s3-accesspoint.dualstack.fips-us-east-1.amazonaws.com", - Region.of("us-east-1"), - S3Configuration.builder().dualstackEnabled(true), - Region.of("fips-us-east-1")); - verifyAccesspointArn("https", - "arn:aws:s3:us-east-1:12345678910:accesspoint/foobar", - "https://foobar-12345678910.s3-accesspoint.dualstack.fips-us-east-1.amazonaws.com", - Region.of("us-east-1"), - S3Configuration.builder().dualstackEnabled(true), - Region.of("fips-us-east-1")); - } - - @Test - public void accesspointArn_withFipsRegionPrefix_FipsInArn_DualStackEnabled_shouldConvertEndpoint() { - verifyAccesspointArn("http", - "arn:aws:s3:fips-us-east-1:12345678910:accesspoint/foobar", - "http://foobar-12345678910.s3-accesspoint.dualstack.fips-us-east-1.amazonaws.com", - Region.of("fips-us-east-1"), - S3Configuration.builder().dualstackEnabled(true), - Region.of("fips-us-east-1")); - verifyAccesspointArn("https", - "arn:aws:s3:fips-us-east-1:12345678910:accesspoint/foobar", - "https://foobar-12345678910.s3-accesspoint.dualstack.fips-us-east-1.amazonaws.com", - Region.of("fips-us-east-1"), - S3Configuration.builder().dualstackEnabled(true), - Region.of("fips-us-east-1")); - } - - @Test - public void accesspointArn_withFipsRegionSuffix_noFipsinArn_shouldConvertEndpoint() { - verifyAccesspointArn("http", - "arn:aws:s3:us-east-1:12345678910:accesspoint/foobar", - "http://foobar-12345678910.s3-accesspoint.fips-us-east-1.amazonaws.com", - Region.of("us-east-1"), - S3Configuration.builder(), - Region.of("us-east-1-fips")); - verifyAccesspointArn("https", - "arn:aws:s3:us-east-1:12345678910:accesspoint/foobar", - "https://foobar-12345678910.s3-accesspoint.fips-us-east-1.amazonaws.com", - Region.of("us-east-1"), - S3Configuration.builder(), - Region.of("us-east-1-fips")); - } - - @Test - public void accesspointArn_noFipsRegionPrefix_FipsInArn_shouldConvertEndpoint() { - verifyAccesspointArn("http", - "arn:aws:s3:fips-us-east-1:12345678910:accesspoint/foobar", - "http://foobar-12345678910.s3-accesspoint.us-east-1.amazonaws.com", - Region.of("fips-us-east-1"), - S3Configuration.builder(), - Region.of("us-east-1")); - verifyAccesspointArn("https", - "arn:aws:s3:fips-us-east-1:12345678910:accesspoint/foobar", - "https://foobar-12345678910.s3-accesspoint.us-east-1.amazonaws.com", - Region.of("fips-us-east-1"), - S3Configuration.builder(), - Region.of("us-east-1")); - } - - @Test - public void accesspointArn_noFipsRegionPrefix_FipsInArn_useArnRegionEnabled_shouldConvertEndpoint() { - verifyAccesspointArn("http", - "arn:aws:s3:fips-us-east-1:12345678910:accesspoint/foobar", - "http://foobar-12345678910.s3-accesspoint.fips-us-east-1.amazonaws.com", - Region.of("fips-us-east-1"), - S3Configuration.builder().useArnRegionEnabled(true), - Region.of("us-east-1")); - verifyAccesspointArn("https", - "arn:aws:s3:fips-us-east-1:12345678910:accesspoint/foobar", - "https://foobar-12345678910.s3-accesspoint.fips-us-east-1.amazonaws.com", - Region.of("fips-us-east-1"), - S3Configuration.builder().useArnRegionEnabled(true), - Region.of("us-east-1")); - } - - @Test - public void accesspointArn_noFipsRegionPrefix_FipsInArn_useArnRegionEnabled_DualstackEnabled_shouldConvertEndpoint() { - verifyAccesspointArn("http", - "arn:aws:s3:fips-us-east-1:12345678910:accesspoint/foobar", - "http://foobar-12345678910.s3-accesspoint.dualstack.fips-us-east-1.amazonaws.com", - Region.of("fips-us-east-1"), - S3Configuration.builder().useArnRegionEnabled(true).dualstackEnabled(true), - Region.of("us-east-1")); - verifyAccesspointArn("https", - "arn:aws:s3:fips-us-east-1:12345678910:accesspoint/foobar", - "https://foobar-12345678910.s3-accesspoint.dualstack.fips-us-east-1.amazonaws.com", - Region.of("fips-us-east-1"), - S3Configuration.builder().useArnRegionEnabled(true).dualstackEnabled(true), - Region.of("us-east-1")); - } - - @Test - public void accesspointArn_withAccelerateEnabled_shouldThrowIllegalArgumentException() { - assertThatThrownBy(() -> verifyAccesspointArn("http", - "arn:aws:s3:us-east-1:12345678910:accesspoint/foobar", - "http://foobar-12345678910.s3-accesspoint.us-east-1.amazonaws.com", - Region.of("us-east-1"), - S3Configuration.builder().accelerateModeEnabled(true), - Region.of("us-east-1"))) - .isInstanceOf(IllegalArgumentException.class) - .hasMessageContaining("accelerate"); - } - - - @Test - public void accesspointArn_withPathStyleAddressingEnabled_shouldThrowIllegalArgumentException() { - assertThatThrownBy(() -> verifyAccesspointArn("http", - "arn:aws:s3:us-east-1:12345678910:accesspoint/foobar", - "http://foobar-12345678910.s3-accesspoint.us-east-1.amazonaws.com", - Region.of("us-east-1"), - S3Configuration.builder().pathStyleAccessEnabled(true), - Region.of("us-east-1"))) - .isInstanceOf(IllegalArgumentException.class) - .hasMessageContaining("path style"); - } - - @Test - public void outpostAccessPointArn_shouldConvertEndpoint() { - - verifyAccesspointArn("http", - "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", - "http://myaccesspoint-123456789012.op-01234567890123456.s3-outposts.us-west-2.amazonaws.com", - Region.of("us-west-2"), - S3Configuration.builder(), - Region.of("us-west-2")); - - verifyAccesspointArn("https", - "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", - "https://myaccesspoint-123456789012.op-01234567890123456.s3-outposts.us-west-2.amazonaws.com", - Region.of("us-west-2"), - S3Configuration.builder(), - Region.of("us-west-2")); - } - - @Test - public void outpostAccessPointArn_futureUnknownRegion_US_correctlyInfersPartition() { - verifyAccesspointArn("http", - "arn:aws:s3-outposts:us-future-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", - "http://myaccesspoint-123456789012.op-01234567890123456.s3-outposts.us-future-2.amazonaws.com", - Region.of("us-future-2"), - S3Configuration.builder(), - Region.of("us-future-2")); - } - - @Test - public void outpostAccessPointArn_futureUnknownRegion_crossRegion_correctlyInfersPartition() { - verifyAccesspointArn("http", - "arn:aws:s3-outposts:us-future-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", - "http://myaccesspoint-123456789012.op-01234567890123456.s3-outposts.us-future-2.amazonaws.com", - Region.of("us-future-2"), - S3Configuration.builder().useArnRegionEnabled(true), - Region.of("us-future-1")); - } - - @Test - public void outpostAccessPointArn_futureUnknownRegion_CN_correctlyInfersPartition() { - verifyAccesspointArn("http", - "arn:aws-cn:s3-outposts:cn-future-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", - "http://myaccesspoint-123456789012.op-01234567890123456.s3-outposts.cn-future-1.amazonaws.com.cn", - Region.of("cn-future-1"), - S3Configuration.builder(), - Region.of("cn-future-1")); - } - - @Test - public void outpostAccessPointArn_futureUnknownRegionAndPartition_defaultsToAws() { - verifyAccesspointArn("http", - "arn:aws:s3-outposts:unknown:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", - "http://myaccesspoint-123456789012.op-01234567890123456.s3-outposts.unknown.amazonaws.com", - Region.of("unknown"), - S3Configuration.builder(), - Region.of("unknown")); + @Before + public void setUp() throws Exception { + interceptor = new EndpointAddressInterceptor(); } @Test - public void outpostAccessPointArn_invalidPartition_throwsIllegalArgumentException() { - assertThatThrownBy(() -> verifyAccesspointArn("http", - "arn:bar:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", - null, - S3Configuration.builder())) - .isInstanceOf(IllegalArgumentException.class) - .hasMessageContaining("bar"); - } + public void accesspointArn_shouldReturnStandardRequest() { + ExecutionAttributes executionAttributes = createExecutionAttributes(S3Configuration.builder(), DEFAULT_REGION); + SdkHttpRequest sdkHttpFullRequest = interceptor.modifyHttpRequest(createContext(AP_ARN), executionAttributes); - @Test - public void outpostAccessPointArn_differentRegionWithoutUseArnRegion_throwsIllegalArgumentException() { - assertThatThrownBy(() -> verifyAccesspointArn("http", - "arn:bar:aws-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", - null, - S3Configuration.builder())) - .isInstanceOf(IllegalArgumentException.class) - .hasMessageContaining("region"); - } - - @Test - public void outpostAccessPointArn_fipsEnabled_throwsIllegalArgumentException() { - assertThatThrownBy(() -> verifyAccesspointArn("http", - "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", - null, - Region.of("us-east-1"), - S3Configuration.builder().useArnRegionEnabled(true), - Region.of("fips-us-east-1"))) - .isInstanceOf(IllegalArgumentException.class) - .hasMessageContaining("FIPS"); + String expectedEndpoint = "http://foobar-123456789012.s3-accesspoint.us-west-2.amazonaws.com"; + assertThat(sdkHttpFullRequest.getUri()).isEqualTo(uri(expectedEndpoint)); + assertThat(executionAttributes.getAttribute(SIGNING_REGION)).isEqualTo(Region.US_WEST_2); + assertThat(executionAttributes.getAttribute(SERVICE_SIGNING_NAME)).isEqualTo(DEFAULT_SIGNING_NAME); } @Test - public void outpostAccessPointArn_dualStackEnabled_throwsIllegalArgumentException() { - assertThatThrownBy(() -> verifyAccesspointArn("http", - "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", - null, - Region.of("us-east-1"), - S3Configuration.builder().dualstackEnabled(true), - Region.of("us-east-1"))) - .isInstanceOf(IllegalArgumentException.class) - .hasMessageContaining("dualstack"); - } + public void outpostAccessPointArn_sameRegion_shouldRegion() { + ExecutionAttributes executionAttributes = createExecutionAttributes(S3Configuration.builder(), DEFAULT_REGION); + SdkHttpRequest sdkHttpFullRequest = interceptor.modifyHttpRequest(createContext(OUTPOSTS_ARN), executionAttributes); - @Test - public void outpostAccessPointArn_accelerateEnabled_throwsIllegalArgumentException() { - assertThatThrownBy(() -> verifyAccesspointArn("http", - "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", - null, - Region.of("us-east-1"), - S3Configuration.builder().accelerateModeEnabled(true), - Region.of("us-east-1"))) - .isInstanceOf(IllegalArgumentException.class) - .hasMessageContaining("accelerate"); + String expectedEndpoint = "http://myaccesspoint-123456789012.op-01234567890123456.s3-outposts.us-west-2.amazonaws.com"; + assertThat(sdkHttpFullRequest.getUri()).isEqualTo(uri(expectedEndpoint)); + assertThat(executionAttributes.getAttribute(SIGNING_REGION)).isEqualTo(Region.US_WEST_2); + assertThat(executionAttributes.getAttribute(SERVICE_SIGNING_NAME)).isEqualTo(OUTPOSTS_SIGNING_NAME); } @Test - public void outpostAccessPointArn_ArnMissingAccesspointName_throwsIllegalArgumentException() { - assertThatThrownBy(() -> verifyAccesspointArn("http", - "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456", - null, - Region.of("us-east-1"), - S3Configuration.builder().accelerateModeEnabled(true), - Region.of("us-east-1"))) - .isInstanceOf(IllegalArgumentException.class) - .hasMessageContaining("Invalid format"); - } - - private void verifyVirtualStyleConvertDnsEndpoint(String protocol) { - String bucketName = "test-bucket"; - String key = "test-key"; - URI customUri = URI.create(String.format("%s://s3-test.com/%s/%s", protocol, bucketName, key)); - URI expectedUri = URI.create(String.format("%s://%s.s3.dualstack.us-east-1.amazonaws.com/%s", protocol, - bucketName, key)); - - Context.ModifyHttpRequest ctx = context(ListObjectsV2Request.builder().bucket(bucketName).build(), - sdkHttpRequest(customUri)); - ExecutionAttributes executionAttributes = new ExecutionAttributes(); - S3Configuration s3Configuration = S3Configuration.builder().dualstackEnabled(true).build(); - - executionAttributes.putAttribute(SERVICE_CONFIG, s3Configuration); - executionAttributes.putAttribute(AWS_REGION, Region.US_EAST_1); - - SdkHttpRequest sdkHttpFullRequest = interceptor.modifyHttpRequest(ctx, executionAttributes); - - assertThat(sdkHttpFullRequest.getUri()).isEqualTo(expectedUri); - } + public void outpostAccessPointArn_crossRegion_ArnRegionEnabled_correctlyInfersPartition() { + ExecutionAttributes executionAttributes = createExecutionAttributes(S3Configuration.builder().useArnRegionEnabled(true), + Region.US_EAST_1); + SdkHttpRequest sdkHttpFullRequest = interceptor.modifyHttpRequest(createContext(OUTPOSTS_ARN), executionAttributes); - private SdkHttpRequest sdkHttpRequest(URI customUri) { - return SdkHttpFullRequest.builder() - .protocol(customUri.getScheme()) - .host(customUri.getHost()) - .port(customUri.getPort()) - .method(SdkHttpMethod.GET) - .encodedPath(customUri.getPath()) - .build(); + String expectedEndpoint = "http://myaccesspoint-123456789012.op-01234567890123456.s3-outposts.us-west-2.amazonaws.com"; + assertThat(sdkHttpFullRequest.getUri()).isEqualTo(uri(expectedEndpoint)); + assertThat(executionAttributes.getAttribute(SIGNING_REGION)).isEqualTo(Region.US_WEST_2); + assertThat(executionAttributes.getAttribute(SERVICE_SIGNING_NAME)).isEqualTo(OUTPOSTS_SIGNING_NAME); } - private void verifyAccelerateDisabledOperationsEndpointNotConverted(SdkRequest request) { - URI customUri = URI.create("http://s3-test.com"); - Context.ModifyHttpRequest ctx = context(request, sdkHttpRequest(customUri)); - ExecutionAttributes executionAttributes = new ExecutionAttributes(); - S3Configuration s3Configuration = S3Configuration.builder().accelerateModeEnabled(true).build(); - - executionAttributes.putAttribute(SERVICE_CONFIG, s3Configuration); - executionAttributes.putAttribute(AWS_REGION, Region.US_EAST_1); - - SdkHttpRequest sdkHttpFullRequest = interceptor.modifyHttpRequest(ctx, executionAttributes); - - assertThat(sdkHttpFullRequest.getUri()).isEqualTo(customUri); - } + private Context.ModifyHttpRequest createContext(String accessPointArn) { + URI customUri = URI.create(String.format("http://s3-test.com/%s/%s", urlEncode(accessPointArn), KEY)); + PutObjectRequest request = PutObjectRequest.builder().bucket(accessPointArn).key(KEY).build(); - private void verifyEndpoint(String protocol, String expectedEndpoint, - S3Configuration.Builder builder) { - String bucket = "test-bucket"; - String key = "test-key"; - URI customUri = URI.create(String.format("%s://s3-test.com/%s/%s", protocol, bucket, key)); - URI expectedUri = URI.create(String.format("%s/%s/%s", expectedEndpoint, bucket, key)); - Context.ModifyHttpRequest ctx = context(PutObjectRequest.builder().build(), sdkHttpRequest(customUri)); - ExecutionAttributes executionAttributes = new ExecutionAttributes(); - S3Configuration s3Configuration = builder.build(); - - executionAttributes.putAttribute(SERVICE_CONFIG, s3Configuration); - executionAttributes.putAttribute(AWS_REGION, Region.US_EAST_1); - - SdkHttpRequest sdkHttpFullRequest = interceptor.modifyHttpRequest(ctx, executionAttributes); - - assertThat(sdkHttpFullRequest.getUri()).isEqualTo(expectedUri); + return InterceptorTestUtils.modifyHttpRequestContext(request, InterceptorTestUtils.sdkHttpRequest(customUri)); } - private void verifyAccesspointArn(String protocol, String accessPointArn, String expectedEndpoint, - Region expectedSigningRegion, - S3Configuration.Builder builder, Region region) { - String key = "test-key"; - - URI customUri = URI.create(String.format("%s://s3-test.com/%s/%s", protocol, urlEncode(accessPointArn), key)); - URI expectedUri = URI.create(String.format("%s/%s", expectedEndpoint, key)); - PutObjectRequest putObjectRequest = PutObjectRequest.builder() - .bucket(accessPointArn) - .key(key) - .build(); - Context.ModifyHttpRequest ctx = context(putObjectRequest, sdkHttpRequest(customUri)); + private ExecutionAttributes createExecutionAttributes(S3Configuration.Builder builder, Region region) { ExecutionAttributes executionAttributes = new ExecutionAttributes(); - S3Configuration s3Configuration = builder.build(); - - executionAttributes.putAttribute(SERVICE_CONFIG, s3Configuration); + executionAttributes.putAttribute(SERVICE_CONFIG, builder.build()); executionAttributes.putAttribute(AWS_REGION, region); executionAttributes.putAttribute(SIGNING_REGION, region); - executionAttributes.putAttribute(SERVICE_SIGNING_NAME, "s3"); - - SdkHttpRequest sdkHttpFullRequest = interceptor.modifyHttpRequest(ctx, executionAttributes); - - assertThat(executionAttributes.getAttribute(SIGNING_REGION)) - .isEqualTo(expectedSigningRegion); - assertThat(sdkHttpFullRequest.getUri()).isEqualTo(expectedUri); - - String expectedSigningName; - if (accessPointArn.contains(":s3-outposts")) { - expectedSigningName = "s3-outposts"; - } else { - expectedSigningName = "s3"; - } - assertThat(executionAttributes.getAttribute(SERVICE_SIGNING_NAME)) - .isEqualTo(expectedSigningName); - } - - - private void verifyAccesspointArn(String protocol, String accessPointArn, String expectedEndpoint, - S3Configuration.Builder builder) { - verifyAccesspointArn(protocol, accessPointArn, expectedEndpoint, Region.US_EAST_1, builder, Region.US_EAST_1); + executionAttributes.putAttribute(SERVICE_SIGNING_NAME, DEFAULT_SIGNING_NAME); + return executionAttributes; } - private Context.ModifyHttpRequest context(SdkRequest request, SdkHttpRequest sdkHttpRequest) { - return new Context.ModifyHttpRequest() { - @Override - public SdkHttpRequest httpRequest() { - return sdkHttpRequest; - } - - @Override - public Optional requestBody() { - return null; - } - - @Override - public Optional asyncRequestBody() { - return null; - } - - @Override - public SdkRequest request() { - return request; - } - }; + private URI uri(String expectedEndpoint) { + return URI.create(String.format("%s/%s", expectedEndpoint, KEY)); } } diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/utils/InterceptorTestUtils.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/utils/InterceptorTestUtils.java index 06c7efa12347..83e462042437 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/utils/InterceptorTestUtils.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/utils/InterceptorTestUtils.java @@ -37,6 +37,23 @@ public final class InterceptorTestUtils { private InterceptorTestUtils() { } + public static SdkHttpFullRequest sdkHttpFullRequest() { + return SdkHttpFullRequest.builder() + .uri(URI.create("http://localhost:8080")) + .method(SdkHttpMethod.GET) + .build(); + } + + public static SdkHttpRequest sdkHttpRequest(URI customUri) { + return SdkHttpFullRequest.builder() + .protocol(customUri.getScheme()) + .host(customUri.getHost()) + .port(customUri.getPort()) + .method(SdkHttpMethod.GET) + .encodedPath(customUri.getPath()) + .build(); + } + public static Context.ModifyHttpResponse modifyHttpResponse(SdkRequest request, SdkHttpResponse sdkHttpResponse) { Publisher publisher = new EmptyPublisher<>(); @@ -116,13 +133,6 @@ public SdkRequest request() { }; } - public static SdkHttpFullRequest sdkHttpFullRequest() { - return SdkHttpFullRequest.builder() - .uri(URI.create("http://localhost:8080")) - .method(SdkHttpMethod.GET) - .build(); - } - public static Context.ModifyResponse modifyResponseContext(SdkRequest request, SdkResponse response, SdkHttpResponse sdkHttpResponse) { return new Context.ModifyResponse() { @Override diff --git a/services/s3control/pom.xml b/services/s3control/pom.xml index bf081f0508fe..f1b9962db25b 100644 --- a/services/s3control/pom.xml +++ b/services/s3control/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT s3control AWS Java SDK :: Services :: Amazon S3 Control diff --git a/services/s3control/src/it/java/software.amazon.awssdk.services.s3control/S3AsyncAccessPointsIntegrationTest.java b/services/s3control/src/it/java/software.amazon.awssdk.services.s3control/S3AsyncAccessPointsIntegrationTest.java new file mode 100644 index 000000000000..48f537bece32 --- /dev/null +++ b/services/s3control/src/it/java/software.amazon.awssdk.services.s3control/S3AsyncAccessPointsIntegrationTest.java @@ -0,0 +1,95 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package software.amazon.awssdk.services.s3control; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.Assert.assertNotNull; +import static software.amazon.awssdk.testutils.service.S3BucketUtils.temporaryBucketName; + +import java.util.StringJoiner; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.sts.StsClient; + +public class S3AsyncAccessPointsIntegrationTest extends S3ControlIntegrationTestBase { + + private static final String BUCKET = temporaryBucketName(S3AsyncAccessPointsIntegrationTest.class); + + private static final String AP_NAME = "java-sdk-" + System.currentTimeMillis(); + + private static final String KEY = "some-key"; + + private static S3ControlAsyncClient s3control; + + private static StsClient sts; + + private static String accountId; + + @BeforeClass + public static void setupFixture() { + createBucket(BUCKET); + + s3control = S3ControlAsyncClient.builder() + .region(Region.US_WEST_2) + .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) + .build(); + + sts = StsClient.builder() + .region(Region.US_WEST_2) + .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) + .build(); + + accountId = sts.getCallerIdentity().account(); + s3control.createAccessPoint(r -> r.accountId(accountId) + .bucket(BUCKET) + .name(AP_NAME)) + .join(); + } + + @AfterClass + public static void tearDown() { + deleteBucketAndAllContents(BUCKET); + s3control.deleteAccessPoint(b -> b.accountId(accountId).name(AP_NAME)).join(); + } + + @Test + public void accessPointOperation_nonArns() { + assertNotNull(s3control.listAccessPoints(b -> b.bucket(BUCKET).accountId(accountId).maxResults(1)).join()); + assertNotNull(s3control.getAccessPoint(b -> b.name(AP_NAME).accountId(accountId)).join()); + } + + @Test + public void transfer_Succeeds_UsingAccessPoint() { + StringJoiner apArn = new StringJoiner(":"); + apArn.add("arn").add("aws").add("s3").add("us-west-2").add(accountId).add("accesspoint").add(AP_NAME); + + s3.putObject(PutObjectRequest.builder() + .bucket(apArn.toString()) + .key(KEY) + .build(), RequestBody.fromString("helloworld")); + + String objectContent = s3.getObjectAsBytes(GetObjectRequest.builder() + .bucket(apArn.toString()) + .key(KEY) + .build()).asUtf8String(); + + assertThat(objectContent).isEqualTo("helloworld"); + } +} diff --git a/services/s3control/src/main/java/software/amazon/awssdk/services/s3control/internal/interceptors/DisableDoubleUrlEncodingForSigningInterceptor.java b/services/s3control/src/main/java/software/amazon/awssdk/services/s3control/internal/interceptors/DisableDoubleUrlEncodingForSigningInterceptor.java new file mode 100644 index 000000000000..01c8cbcbbc38 --- /dev/null +++ b/services/s3control/src/main/java/software/amazon/awssdk/services/s3control/internal/interceptors/DisableDoubleUrlEncodingForSigningInterceptor.java @@ -0,0 +1,37 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3control.internal.interceptors; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.auth.signer.AwsSignerExecutionAttribute; +import software.amazon.awssdk.core.interceptor.Context; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; + +/** + * Execution interceptor which modifies the HTTP request to S3 Control to + * add a signer attribute that will instruct the signer to not double-url-encode path elements. + * S3 Control expects path elements to be encoded only once in the canonical URI. + * Similar functionality exists for S3. + */ +@SdkInternalApi +public final class DisableDoubleUrlEncodingForSigningInterceptor implements ExecutionInterceptor { + + @Override + public void beforeExecution(Context.BeforeExecution context, ExecutionAttributes executionAttributes) { + executionAttributes.putAttribute(AwsSignerExecutionAttribute.SIGNER_DOUBLE_URL_ENCODE, Boolean.FALSE); + } +} diff --git a/services/s3control/src/main/resources/codegen-resources/service-2.json b/services/s3control/src/main/resources/codegen-resources/service-2.json index f628e22e7d51..721dc2a3c630 100644 --- a/services/s3control/src/main/resources/codegen-resources/service-2.json +++ b/services/s3control/src/main/resources/codegen-resources/service-2.json @@ -23,7 +23,7 @@ "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} }, "output":{"shape":"CreateAccessPointResult"}, - "documentation":"

Creates an access point and associates it with the specified bucket. For more information, see Managing Data Access with Amazon S3 Access Points in the Amazon Simple Storage Service Developer Guide.

Using this action with Amazon S3 on Outposts

This action:

  • Requires a virtual private cloud (VPC) configuration as S3 on Outposts only supports VPC style access points.

  • Does not support ACL on S3 on Outposts buckets.

  • Does not support Public Access on S3 on Outposts buckets.

  • Does not support object lock for S3 on Outposts buckets.

For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service Developer Guide .

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the outpost-id derived using the access point ARN, see the Example section below.

The following actions are related to CreateAccessPoint:

", + "documentation":"

Creates an access point and associates it with the specified bucket. For more information, see Managing Data Access with Amazon S3 Access Points in the Amazon Simple Storage Service Developer Guide.

Using this action with Amazon S3 on Outposts

This action:

  • Requires a virtual private cloud (VPC) configuration as S3 on Outposts only supports VPC style access points.

  • Does not support ACL on S3 on Outposts buckets.

  • Does not support Public Access on S3 on Outposts buckets.

  • Does not support object lock for S3 on Outposts buckets.

For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service Developer Guide .

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

The following actions are related to CreateAccessPoint:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -40,7 +40,7 @@ {"shape":"BucketAlreadyExists"}, {"shape":"BucketAlreadyOwnedByYou"} ], - "documentation":"

This API operation creates an Amazon S3 on Outposts bucket. To create an S3 bucket, see Create Bucket in the Amazon Simple Storage Service API.

Creates a new Outposts bucket. By creating the bucket, you become the bucket owner. To create an Outposts bucket, you must have S3 on Outposts. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service Developer Guide.

Not every string is an acceptable bucket name. For information on bucket naming restrictions, see Working with Amazon S3 Buckets.

S3 on Outposts buckets do not support

  • ACLs. Instead, configure access point policies to manage access to buckets.

  • Public access.

  • Object Lock

  • Bucket Location constraint

For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and outpost-id in your API request, see the Example section below.

The following actions are related to CreateBucket for Amazon S3 on Outposts:

", + "documentation":"

This API operation creates an Amazon S3 on Outposts bucket. To create an S3 bucket, see Create Bucket in the Amazon Simple Storage Service API.

Creates a new Outposts bucket. By creating the bucket, you become the bucket owner. To create an Outposts bucket, you must have S3 on Outposts. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service Developer Guide.

Not every string is an acceptable bucket name. For information on bucket naming restrictions, see Working with Amazon S3 Buckets.

S3 on Outposts buckets do not support

  • ACLs. Instead, configure access point policies to manage access to buckets.

  • Public access.

  • Object Lock

  • Bucket Location constraint

For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and x-amz-outpost-id in your API request, see the Examples section.

The following actions are related to CreateBucket for Amazon S3 on Outposts:

", "httpChecksumRequired":true }, "CreateJob":{ @@ -61,7 +61,7 @@ {"shape":"IdempotencyException"}, {"shape":"InternalServiceException"} ], - "documentation":"

S3 Batch Operations performs large-scale Batch Operations on Amazon S3 objects. Batch Operations can run a single operation or action on lists of Amazon S3 objects that you specify. For more information, see S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.

This operation creates a S3 Batch Operations job.

Related actions include:

", + "documentation":"

S3 Batch Operations performs large-scale Batch Operations on Amazon S3 objects. Batch Operations can run a single operation or action on lists of Amazon S3 objects that you specify. For more information, see S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.

This operation creates an S3 Batch Operations job.

Related actions include:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -73,7 +73,7 @@ "requestUri":"/v20180820/accesspoint/{name}" }, "input":{"shape":"DeleteAccessPointRequest"}, - "documentation":"

Deletes the specified access point.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the outpost-id derived using the access point ARN, see the ARN, see the Example section below.

The following actions are related to DeleteAccessPoint:

", + "documentation":"

Deletes the specified access point.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

The following actions are related to DeleteAccessPoint:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -85,7 +85,7 @@ "requestUri":"/v20180820/accesspoint/{name}/policy" }, "input":{"shape":"DeleteAccessPointPolicyRequest"}, - "documentation":"

Deletes the access point policy for the specified access point.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the outpost-id derived using the access point ARN, see the Example section below.

The following actions are related to DeleteAccessPointPolicy:

", + "documentation":"

Deletes the access point policy for the specified access point.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

The following actions are related to DeleteAccessPointPolicy:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -97,7 +97,7 @@ "requestUri":"/v20180820/bucket/{name}" }, "input":{"shape":"DeleteBucketRequest"}, - "documentation":"

This API operation deletes an Amazon S3 on Outposts bucket. To delete an S3 bucket, see DeleteBucket in the Amazon Simple Storage Service API.

Deletes the Amazon S3 on Outposts bucket. All objects (including all object versions and delete markers) in the bucket must be deleted before the bucket itself can be deleted. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service Developer Guide.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the outpost-id derived using the access point ARN, see the Example section below.

Related Resources

", + "documentation":"

This API operation deletes an Amazon S3 on Outposts bucket. To delete an S3 bucket, see DeleteBucket in the Amazon Simple Storage Service API.

Deletes the Amazon S3 on Outposts bucket. All objects (including all object versions and delete markers) in the bucket must be deleted before the bucket itself can be deleted. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service Developer Guide.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

Related Resources

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -109,7 +109,7 @@ "requestUri":"/v20180820/bucket/{name}/lifecycleconfiguration" }, "input":{"shape":"DeleteBucketLifecycleConfigurationRequest"}, - "documentation":"

This API action deletes an Amazon S3 on Outposts bucket's lifecycle configuration. To delete an S3 bucket's lifecycle configuration, see DeleteBucketLifecycle in the Amazon Simple Storage Service API.

Deletes the lifecycle configuration from the specified Outposts bucket. Amazon S3 on Outposts removes all the lifecycle configuration rules in the lifecycle subresource associated with the bucket. Your objects never expire, and Amazon S3 on Outposts no longer automatically deletes any objects on the basis of rules contained in the deleted lifecycle configuration. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service Developer Guide.

To use this operation, you must have permission to perform the s3outposts:DeleteLifecycleConfiguration action. By default, the bucket owner has this permission and the Outposts bucket owner can grant this permission to others.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the outpost-id derived using the access point ARN, see the Example section below.

For more information about object expiration, see Elements to Describe Lifecycle Actions.

Related actions include:

", + "documentation":"

This API action deletes an Amazon S3 on Outposts bucket's lifecycle configuration. To delete an S3 bucket's lifecycle configuration, see DeleteBucketLifecycle in the Amazon Simple Storage Service API.

Deletes the lifecycle configuration from the specified Outposts bucket. Amazon S3 on Outposts removes all the lifecycle configuration rules in the lifecycle subresource associated with the bucket. Your objects never expire, and Amazon S3 on Outposts no longer automatically deletes any objects on the basis of rules contained in the deleted lifecycle configuration. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service Developer Guide.

To use this operation, you must have permission to perform the s3-outposts:DeleteLifecycleConfiguration action. By default, the bucket owner has this permission and the Outposts bucket owner can grant this permission to others.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

For more information about object expiration, see Elements to Describe Lifecycle Actions.

Related actions include:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -121,7 +121,7 @@ "requestUri":"/v20180820/bucket/{name}/policy" }, "input":{"shape":"DeleteBucketPolicyRequest"}, - "documentation":"

This API operation deletes an Amazon S3 on Outposts bucket policy. To delete an S3 bucket policy, see DeleteBucketPolicy in the Amazon Simple Storage Service API.

This implementation of the DELETE operation uses the policy subresource to delete the policy of a specified Amazon S3 on Outposts bucket. If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the s3outposts:DeleteBucketPolicy permissions on the specified Outposts bucket and belong to the bucket owner's account to use this operation. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service Developer Guide.

If you don't have DeleteBucketPolicy permissions, Amazon S3 returns a 403 Access Denied error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed error.

As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action.

For more information about bucket policies, see Using Bucket Policies and User Policies.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the outpost-id derived using the access point ARN, see the Example section below.

The following actions are related to DeleteBucketPolicy:

", + "documentation":"

This API operation deletes an Amazon S3 on Outposts bucket policy. To delete an S3 bucket policy, see DeleteBucketPolicy in the Amazon Simple Storage Service API.

This implementation of the DELETE operation uses the policy subresource to delete the policy of a specified Amazon S3 on Outposts bucket. If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the s3-outposts:DeleteBucketPolicy permissions on the specified Outposts bucket and belong to the bucket owner's account to use this operation. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service Developer Guide.

If you don't have DeleteBucketPolicy permissions, Amazon S3 returns a 403 Access Denied error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed error.

As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action.

For more information about bucket policies, see Using Bucket Policies and User Policies.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

The following actions are related to DeleteBucketPolicy:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -134,7 +134,7 @@ "responseCode":204 }, "input":{"shape":"DeleteBucketTaggingRequest"}, - "documentation":"

This API operation deletes an Amazon S3 on Outposts bucket's tags. To delete an S3 bucket tags, see DeleteBucketTagging in the Amazon Simple Storage Service API.

Deletes the tags from the Outposts bucket. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service Developer Guide.

To use this operation, you must have permission to perform the PutBucketTagging action. By default, the bucket owner has this permission and can grant this permission to others.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the outpost-id derived using the access point ARN, see the Example section below.

The following actions are related to DeleteBucketTagging:

", + "documentation":"

This operation deletes an Amazon S3 on Outposts bucket's tags. To delete an S3 bucket tags, see DeleteBucketTagging in the Amazon Simple Storage Service API.

Deletes the tags from the Outposts bucket. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service Developer Guide.

To use this operation, you must have permission to perform the PutBucketTagging action. By default, the bucket owner has this permission and can grant this permission to others.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

The following actions are related to DeleteBucketTagging:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -169,6 +169,31 @@ "hostPrefix":"{AccountId}." } }, + "DeleteStorageLensConfiguration":{ + "name":"DeleteStorageLensConfiguration", + "http":{ + "method":"DELETE", + "requestUri":"/v20180820/storagelens/{storagelensid}" + }, + "input":{"shape":"DeleteStorageLensConfigurationRequest"}, + "documentation":"

Deletes the Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Working with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

To use this action, you must have permission to perform the s3:DeleteStorageLensConfiguration action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

", + "endpoint":{ + "hostPrefix":"{AccountId}." + } + }, + "DeleteStorageLensConfigurationTagging":{ + "name":"DeleteStorageLensConfigurationTagging", + "http":{ + "method":"DELETE", + "requestUri":"/v20180820/storagelens/{storagelensid}/tagging" + }, + "input":{"shape":"DeleteStorageLensConfigurationTaggingRequest"}, + "output":{"shape":"DeleteStorageLensConfigurationTaggingResult"}, + "documentation":"

Deletes the Amazon S3 Storage Lens configuration tags. For more information about S3 Storage Lens, see Working with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

To use this action, you must have permission to perform the s3:DeleteStorageLensConfigurationTagging action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

", + "endpoint":{ + "hostPrefix":"{AccountId}." + } + }, "DescribeJob":{ "name":"DescribeJob", "http":{ @@ -196,7 +221,7 @@ }, "input":{"shape":"GetAccessPointRequest"}, "output":{"shape":"GetAccessPointResult"}, - "documentation":"

Returns configuration information about the specified access point.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the outpost-id derived using the access point ARN, see the Example section below.

The following actions are related to GetAccessPoint:

", + "documentation":"

Returns configuration information about the specified access point.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

The following actions are related to GetAccessPoint:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -235,7 +260,7 @@ }, "input":{"shape":"GetBucketRequest"}, "output":{"shape":"GetBucketResult"}, - "documentation":"

Gets an Amazon S3 on Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

The following actions are related to GetBucket for Amazon S3 on Outposts:

", + "documentation":"

Gets an Amazon S3 on Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the s3-outposts:GetBucket permissions on the specified bucket and belong to the bucket owner's account in order to use this operation. Only users from Outposts bucket owner account with the right permissions can perform actions on an Outposts bucket.

If you don't have s3-outposts:GetBucket permissions or you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 403 Access Denied error.

The following actions are related to GetBucket for Amazon S3 on Outposts:

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -248,7 +273,7 @@ }, "input":{"shape":"GetBucketLifecycleConfigurationRequest"}, "output":{"shape":"GetBucketLifecycleConfigurationResult"}, - "documentation":"

This API operation gets an Amazon S3 on Outposts bucket's lifecycle configuration. To get an S3 bucket's lifecycle configuration, see GetBucketLifecycleConfiguration in the Amazon Simple Storage Service API.

Returns the lifecycle configuration information set on the Outposts bucket. For more information, see Using Amazon S3 on Outposts and for information about lifecycle configuration, see Object Lifecycle Management in Amazon Simple Storage Service Developer Guide.

To use this operation, you must have permission to perform the s3outposts:GetLifecycleConfiguration action. The Outposts bucket owner has this permission, by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the outpost-id derived using the access point ARN, see the Example section below.

GetBucketLifecycleConfiguration has the following special error:

  • Error code: NoSuchLifecycleConfiguration

    • Description: The lifecycle configuration does not exist.

    • HTTP Status Code: 404 Not Found

    • SOAP Fault Code Prefix: Client

The following actions are related to GetBucketLifecycleConfiguration:

", + "documentation":"

This operation gets an Amazon S3 on Outposts bucket's lifecycle configuration. To get an S3 bucket's lifecycle configuration, see GetBucketLifecycleConfiguration in the Amazon Simple Storage Service API.

Returns the lifecycle configuration information set on the Outposts bucket. For more information, see Using Amazon S3 on Outposts and for information about lifecycle configuration, see Object Lifecycle Management in Amazon Simple Storage Service Developer Guide.

To use this operation, you must have permission to perform the s3-outposts:GetLifecycleConfiguration action. The Outposts bucket owner has this permission, by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

GetBucketLifecycleConfiguration has the following special error:

  • Error code: NoSuchLifecycleConfiguration

    • Description: The lifecycle configuration does not exist.

    • HTTP Status Code: 404 Not Found

    • SOAP Fault Code Prefix: Client

The following actions are related to GetBucketLifecycleConfiguration:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -261,7 +286,7 @@ }, "input":{"shape":"GetBucketPolicyRequest"}, "output":{"shape":"GetBucketPolicyResult"}, - "documentation":"

This API action gets a bucket policy for an Amazon S3 on Outposts bucket. To get a policy for an S3 bucket, see GetBucketPolicy in the Amazon Simple Storage Service API.

Returns the policy of a specified Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the GetBucketPolicy permissions on the specified bucket and belong to the bucket owner's account in order to use this operation.

If you don't have s3outposts:GetBucketPolicy permissions, Amazon S3 returns a 403 Access Denied error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed error.

As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action.

For more information about bucket policies, see Using Bucket Policies and User Policies.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the outpost-id derived using the access point ARN, see the Example section below.

The following actions are related to GetBucketPolicy:

", + "documentation":"

This action gets a bucket policy for an Amazon S3 on Outposts bucket. To get a policy for an S3 bucket, see GetBucketPolicy in the Amazon Simple Storage Service API.

Returns the policy of a specified Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the GetBucketPolicy permissions on the specified bucket and belong to the bucket owner's account in order to use this operation.

Only users from Outposts bucket owner account with the right permissions can perform actions on an Outposts bucket. If you don't have s3-outposts:GetBucketPolicy permissions or you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 403 Access Denied error.

As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action.

For more information about bucket policies, see Using Bucket Policies and User Policies.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

The following actions are related to GetBucketPolicy:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -274,7 +299,7 @@ }, "input":{"shape":"GetBucketTaggingRequest"}, "output":{"shape":"GetBucketTaggingResult"}, - "documentation":"

This API operation gets an Amazon S3 on Outposts bucket's tags. To get an S3 bucket tags, see GetBucketTagging in the Amazon Simple Storage Service API.

Returns the tag set associated with the Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

To use this operation, you must have permission to perform the GetBucketTagging action. By default, the bucket owner has this permission and can grant this permission to others.

GetBucketTagging has the following special error:

  • Error code: NoSuchTagSetError

    • Description: There is no tag set associated with the bucket.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the outpost-id derived using the access point ARN, see the Example section below.

The following actions are related to GetBucketTagging:

", + "documentation":"

This operation gets an Amazon S3 on Outposts bucket's tags. To get an S3 bucket tags, see GetBucketTagging in the Amazon Simple Storage Service API.

Returns the tag set associated with the Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

To use this operation, you must have permission to perform the GetBucketTagging action. By default, the bucket owner has this permission and can grant this permission to others.

GetBucketTagging has the following special error:

  • Error code: NoSuchTagSetError

    • Description: There is no tag set associated with the bucket.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

The following actions are related to GetBucketTagging:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -313,6 +338,32 @@ "hostPrefix":"{AccountId}." } }, + "GetStorageLensConfiguration":{ + "name":"GetStorageLensConfiguration", + "http":{ + "method":"GET", + "requestUri":"/v20180820/storagelens/{storagelensid}" + }, + "input":{"shape":"GetStorageLensConfigurationRequest"}, + "output":{"shape":"GetStorageLensConfigurationResult"}, + "documentation":"

Gets the Amazon S3 Storage Lens configuration. For more information, see Working with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

To use this action, you must have permission to perform the s3:GetStorageLensConfiguration action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

", + "endpoint":{ + "hostPrefix":"{AccountId}." + } + }, + "GetStorageLensConfigurationTagging":{ + "name":"GetStorageLensConfigurationTagging", + "http":{ + "method":"GET", + "requestUri":"/v20180820/storagelens/{storagelensid}/tagging" + }, + "input":{"shape":"GetStorageLensConfigurationTaggingRequest"}, + "output":{"shape":"GetStorageLensConfigurationTaggingResult"}, + "documentation":"

Gets the tags of Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Working with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

To use this action, you must have permission to perform the s3:GetStorageLensConfigurationTagging action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

", + "endpoint":{ + "hostPrefix":"{AccountId}." + } + }, "ListAccessPoints":{ "name":"ListAccessPoints", "http":{ @@ -321,7 +372,7 @@ }, "input":{"shape":"ListAccessPointsRequest"}, "output":{"shape":"ListAccessPointsResult"}, - "documentation":"

Returns a list of the access points currently associated with the specified bucket. You can retrieve up to 1000 access points per call. If the specified bucket has more than 1,000 access points (or the number specified in maxResults, whichever is less), the response will include a continuation token that you can use to list the additional access points.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the outpost-id derived using the access point ARN, see the Example section below.

The following actions are related to ListAccessPoints:

", + "documentation":"

Returns a list of the access points currently associated with the specified bucket. You can retrieve up to 1000 access points per call. If the specified bucket has more than 1,000 access points (or the number specified in maxResults, whichever is less), the response will include a continuation token that you can use to list the additional access points.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

The following actions are related to ListAccessPoints:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -352,7 +403,20 @@ }, "input":{"shape":"ListRegionalBucketsRequest"}, "output":{"shape":"ListRegionalBucketsResult"}, - "documentation":"

Returns a list of all Outposts buckets in an Outposts that are owned by the authenticated sender of the request. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and outpost-id in your API request, see the Example section below.

", + "documentation":"

Returns a list of all Outposts buckets in an Outpost that are owned by the authenticated sender of the request. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and x-amz-outpost-id in your request, see the Examples section.

", + "endpoint":{ + "hostPrefix":"{AccountId}." + } + }, + "ListStorageLensConfigurations":{ + "name":"ListStorageLensConfigurations", + "http":{ + "method":"GET", + "requestUri":"/v20180820/storagelens" + }, + "input":{"shape":"ListStorageLensConfigurationsRequest"}, + "output":{"shape":"ListStorageLensConfigurationsResult"}, + "documentation":"

Gets a list of Amazon S3 Storage Lens configurations. For more information about S3 Storage Lens, see Working with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

To use this action, you must have permission to perform the s3:ListStorageLensConfigurations action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -368,7 +432,7 @@ "locationName":"PutAccessPointPolicyRequest", "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} }, - "documentation":"

Associates an access policy with the specified access point. Each access point can have only one policy, so a request made to this API replaces any existing policy associated with the specified access point.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the outpost-id derived using the access point ARN, see the Example section below.

The following actions are related to PutAccessPointPolicy:

", + "documentation":"

Associates an access policy with the specified access point. Each access point can have only one policy, so a request made to this API replaces any existing policy associated with the specified access point.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

The following actions are related to PutAccessPointPolicy:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -380,7 +444,7 @@ "requestUri":"/v20180820/bucket/{name}/lifecycleconfiguration" }, "input":{"shape":"PutBucketLifecycleConfigurationRequest"}, - "documentation":"

This API action puts a lifecycle configuration to an Amazon S3 on Outposts bucket. To put a lifecycle configuration to an S3 bucket, see PutBucketLifecycleConfiguration in the Amazon Simple Storage Service API.

Creates a new lifecycle configuration for the Outposts bucket or replaces an existing lifecycle configuration. Outposts buckets can only support a lifecycle that deletes objects after a certain period of time. For more information, see Managing Lifecycle Permissions for Amazon S3 on Outposts.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the outpost-id derived using the access point ARN, see the Example section below.

The following actions are related to PutBucketLifecycleConfiguration:

", + "documentation":"

This action puts a lifecycle configuration to an Amazon S3 on Outposts bucket. To put a lifecycle configuration to an S3 bucket, see PutBucketLifecycleConfiguration in the Amazon Simple Storage Service API.

Creates a new lifecycle configuration for the Outposts bucket or replaces an existing lifecycle configuration. Outposts buckets only support lifecycle configurations that delete/expire objects after a certain period of time and abort incomplete multipart uploads. For more information, see Managing Lifecycle Permissions for Amazon S3 on Outposts.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

The following actions are related to PutBucketLifecycleConfiguration:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -397,7 +461,7 @@ "locationName":"PutBucketPolicyRequest", "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} }, - "documentation":"

This API action puts a bucket policy to an Amazon S3 on Outposts bucket. To put a policy on an S3 bucket, see PutBucketPolicy in the Amazon Simple Storage Service API.

Applies an Amazon S3 bucket policy to an Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

If you are using an identity other than the root user of the AWS account that owns the Outposts bucket, the calling identity must have the PutBucketPolicy permissions on the specified Outposts bucket and belong to the bucket owner's account in order to use this operation.

If you don't have PutBucketPolicy permissions, Amazon S3 returns a 403 Access Denied error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed error.

As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action.

For more information about bucket policies, see Using Bucket Policies and User Policies.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the outpost-id derived using the access point ARN, see the Example section below.

The following actions are related to PutBucketPolicy:

", + "documentation":"

This action puts a bucket policy to an Amazon S3 on Outposts bucket. To put a policy on an S3 bucket, see PutBucketPolicy in the Amazon Simple Storage Service API.

Applies an Amazon S3 bucket policy to an Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

If you are using an identity other than the root user of the AWS account that owns the Outposts bucket, the calling identity must have the PutBucketPolicy permissions on the specified Outposts bucket and belong to the bucket owner's account in order to use this operation.

If you don't have PutBucketPolicy permissions, Amazon S3 returns a 403 Access Denied error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed error.

As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action.

For more information about bucket policies, see Using Bucket Policies and User Policies.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

The following actions are related to PutBucketPolicy:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -410,7 +474,7 @@ "requestUri":"/v20180820/bucket/{name}/tagging" }, "input":{"shape":"PutBucketTaggingRequest"}, - "documentation":"

This API action puts tags on an Amazon S3 on Outposts bucket. To put tags on an S3 bucket, see PutBucketTagging in the Amazon Simple Storage Service API.

Sets the tags for an Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

Use tags to organize your AWS bill to reflect your own cost structure. To do this, sign up to get your AWS account bill with tag key values included. Then, to see the cost of combined resources, organize your billing information according to resources with the same tag key values. For example, you can tag several resources with a specific application name, and then organize your billing information to see the total cost of that application across several services. For more information, see Cost Allocation and Tagging.

Within a bucket, if you add a tag that has the same key as an existing tag, the new value overwrites the old value. For more information, see Using Cost Allocation in Amazon S3 Bucket Tags.

To use this operation, you must have permissions to perform the s3outposts:PutBucketTagging action. The Outposts bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

PutBucketTagging has the following special errors:

  • Error code: InvalidTagError

  • Error code: MalformedXMLError

    • Description: The XML provided does not match the schema.

  • Error code: OperationAbortedError

    • Description: A conflicting conditional operation is currently in progress against this resource. Try again.

  • Error code: InternalError

    • Description: The service was unable to apply the provided tag to the bucket.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the outpost-id derived using the access point ARN, see the Example section below.

The following actions are related to PutBucketTagging:

", + "documentation":"

This action puts tags on an Amazon S3 on Outposts bucket. To put tags on an S3 bucket, see PutBucketTagging in the Amazon Simple Storage Service API.

Sets the tags for an Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

Use tags to organize your AWS bill to reflect your own cost structure. To do this, sign up to get your AWS account bill with tag key values included. Then, to see the cost of combined resources, organize your billing information according to resources with the same tag key values. For example, you can tag several resources with a specific application name, and then organize your billing information to see the total cost of that application across several services. For more information, see Cost Allocation and Tagging.

Within a bucket, if you add a tag that has the same key as an existing tag, the new value overwrites the old value. For more information, see Using Cost Allocation in Amazon S3 Bucket Tags.

To use this operation, you must have permissions to perform the s3-outposts:PutBucketTagging action. The Outposts bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

PutBucketTagging has the following special errors:

  • Error code: InvalidTagError

  • Error code: MalformedXMLError

    • Description: The XML provided does not match the schema.

  • Error code: OperationAbortedError

    • Description: A conflicting conditional operation is currently in progress against this resource. Try again.

  • Error code: InternalError

    • Description: The service was unable to apply the provided tag to the bucket.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived using the access point ARN, see the Examples section.

The following actions are related to PutBucketTagging:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -434,7 +498,7 @@ {"shape":"NotFoundException"}, {"shape":"TooManyTagsException"} ], - "documentation":"

Sets the supplied tag-set on an S3 Batch Operations job.

A tag is a key-value pair. You can associate S3 Batch Operations tags with any job by sending a PUT request against the tagging subresource that is associated with the job. To modify the existing tag set, you can either replace the existing tag set entirely, or make changes within the existing tag set by retrieving the existing tag set using GetJobTagging, modify that tag set, and use this API action to replace the tag set with the one you modified. For more information, see Controlling access and labeling jobs using tags in the Amazon Simple Storage Service Developer Guide.

  • If you send this request with an empty tag set, Amazon S3 deletes the existing tag set on the Batch Operations job. If you use this method, you are charged for a Tier 1 Request (PUT). For more information, see Amazon S3 pricing.

  • For deleting existing tags for your Batch Operations job, a DeleteJobTagging request is preferred because it achieves the same result without incurring charges.

  • A few things to consider about using tags:

    • Amazon S3 limits the maximum number of tags to 50 tags per job.

    • You can associate up to 50 tags with a job as long as they have unique tag keys.

    • A tag key can be up to 128 Unicode characters in length, and tag values can be up to 256 Unicode characters in length.

    • The key and values are case sensitive.

    • For tagging-related restrictions related to characters and encodings, see User-Defined Tag Restrictions in the AWS Billing and Cost Management User Guide.

To use this operation, you must have permission to perform the s3:PutJobTagging action.

Related actions include:

", + "documentation":"

Sets the supplied tag-set on an S3 Batch Operations job.

A tag is a key-value pair. You can associate S3 Batch Operations tags with any job by sending a PUT request against the tagging subresource that is associated with the job. To modify the existing tag set, you can either replace the existing tag set entirely, or make changes within the existing tag set by retrieving the existing tag set using GetJobTagging, modify that tag set, and use this action to replace the tag set with the one you modified. For more information, see Controlling access and labeling jobs using tags in the Amazon Simple Storage Service Developer Guide.

  • If you send this request with an empty tag set, Amazon S3 deletes the existing tag set on the Batch Operations job. If you use this method, you are charged for a Tier 1 Request (PUT). For more information, see Amazon S3 pricing.

  • For deleting existing tags for your Batch Operations job, a DeleteJobTagging request is preferred because it achieves the same result without incurring charges.

  • A few things to consider about using tags:

    • Amazon S3 limits the maximum number of tags to 50 tags per job.

    • You can associate up to 50 tags with a job as long as they have unique tag keys.

    • A tag key can be up to 128 Unicode characters in length, and tag values can be up to 256 Unicode characters in length.

    • The key and values are case sensitive.

    • For tagging-related restrictions related to characters and encodings, see User-Defined Tag Restrictions in the AWS Billing and Cost Management User Guide.

To use this operation, you must have permission to perform the s3:PutJobTagging action.

Related actions include:

", "endpoint":{ "hostPrefix":"{AccountId}." } @@ -451,6 +515,39 @@ "hostPrefix":"{AccountId}." } }, + "PutStorageLensConfiguration":{ + "name":"PutStorageLensConfiguration", + "http":{ + "method":"PUT", + "requestUri":"/v20180820/storagelens/{storagelensid}" + }, + "input":{ + "shape":"PutStorageLensConfigurationRequest", + "locationName":"PutStorageLensConfigurationRequest", + "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} + }, + "documentation":"

Puts an Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Working with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

To use this action, you must have permission to perform the s3:PutStorageLensConfiguration action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

", + "endpoint":{ + "hostPrefix":"{AccountId}." + } + }, + "PutStorageLensConfigurationTagging":{ + "name":"PutStorageLensConfigurationTagging", + "http":{ + "method":"PUT", + "requestUri":"/v20180820/storagelens/{storagelensid}/tagging" + }, + "input":{ + "shape":"PutStorageLensConfigurationTaggingRequest", + "locationName":"PutStorageLensConfigurationTaggingRequest", + "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} + }, + "output":{"shape":"PutStorageLensConfigurationTaggingResult"}, + "documentation":"

Put or replace tags on an existing Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Working with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

To use this action, you must have permission to perform the s3:PutStorageLensConfigurationTagging action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.

", + "endpoint":{ + "hostPrefix":"{AccountId}." + } + }, "UpdateJobPriority":{ "name":"UpdateJobPriority", "http":{ @@ -550,6 +647,37 @@ "max":64, "pattern":"^\\d{12}$" }, + "AccountLevel":{ + "type":"structure", + "required":["BucketLevel"], + "members":{ + "ActivityMetrics":{ + "shape":"ActivityMetrics", + "documentation":"

A container for the S3 Storage Lens activity metrics.

" + }, + "BucketLevel":{ + "shape":"BucketLevel", + "documentation":"

A container for the S3 Storage Lens bucket-level configuration.

" + } + }, + "documentation":"

A container for the account level Amazon S3 Storage Lens configuration.

" + }, + "ActivityMetrics":{ + "type":"structure", + "members":{ + "IsEnabled":{ + "shape":"IsEnabled", + "documentation":"

A container for whether the activity metrics are enabled.

" + } + }, + "documentation":"

A container for the activity metrics.

" + }, + "AwsOrgArn":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"arn:[a-z\\-]+:organizations::\\d{12}:organization\\/o-[a-z0-9]{10,32}" + }, "BadRequestException":{ "type":"structure", "members":{ @@ -582,6 +710,20 @@ "authenticated-read" ] }, + "BucketLevel":{ + "type":"structure", + "members":{ + "ActivityMetrics":{ + "shape":"ActivityMetrics", + "documentation":"

A container for the bucket-level activity metrics for Amazon S3 Storage Lens

" + }, + "PrefixLevel":{ + "shape":"PrefixLevel", + "documentation":"

A container for the bucket-level prefix-level metrics for S3 Storage Lens

" + } + }, + "documentation":"

A container for the bucket-level configuration.

" + }, "BucketLocationConstraint":{ "type":"string", "enum":[ @@ -603,8 +745,22 @@ "max":255, "min":3 }, + "Buckets":{ + "type":"list", + "member":{ + "shape":"S3BucketArnString", + "locationName":"Arn" + } + }, + "ConfigId":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[a-zA-Z0-9\\-\\_\\.]+" + }, "ConfirmRemoveSelfBucketAccess":{"type":"boolean"}, "ConfirmationRequired":{"type":"boolean"}, + "ContinuationToken":{"type":"string"}, "CreateAccessPointRequest":{ "type":"structure", "required":[ @@ -628,7 +784,7 @@ }, "Bucket":{ "shape":"BucketName", - "documentation":"

The name of the bucket that you want to associate this access point with.

For Amazon S3 on Outposts specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

" + "documentation":"

The name of the bucket that you want to associate this access point with.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

" }, "VpcConfiguration":{ "shape":"VpcConfiguration", @@ -642,7 +798,7 @@ "members":{ "AccessPointArn":{ "shape":"S3AccessPointArn", - "documentation":"

The ARN of the access point.

" + "documentation":"

The ARN of the access point.

This is only supported by Amazon S3 on Outposts.

" } } }, @@ -734,7 +890,7 @@ }, "BucketArn":{ "shape":"S3RegionalBucketArn", - "documentation":"

The Amazon Resource Name (ARN) of the bucket.

For Amazon S3 on Outposts specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

" + "documentation":"

The Amazon Resource Name (ARN) of the bucket.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

" } } }, @@ -827,7 +983,7 @@ }, "Name":{ "shape":"AccessPointName", - "documentation":"

The name of the access point whose policy you want to delete.

For Amazon S3 on Outposts specify the ARN of the access point accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/accesspoint/<my-accesspoint-name>. For example, to access the access point reports-ap through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/accesspoint/reports-ap. The value must be URL encoded.

", + "documentation":"

The name of the access point whose policy you want to delete.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the access point accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/accesspoint/<my-accesspoint-name>. For example, to access the access point reports-ap through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/accesspoint/reports-ap. The value must be URL encoded.

", "location":"uri", "locationName":"name" } @@ -849,7 +1005,7 @@ }, "Name":{ "shape":"AccessPointName", - "documentation":"

The name of the access point you want to delete.

For Amazon S3 on Outposts specify the ARN of the access point accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/accesspoint/<my-accesspoint-name>. For example, to access the access point reports-ap through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/accesspoint/reports-ap. The value must be URL encoded.

", + "documentation":"

The name of the access point you want to delete.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the access point accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/accesspoint/<my-accesspoint-name>. For example, to access the access point reports-ap through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/accesspoint/reports-ap. The value must be URL encoded.

", "location":"uri", "locationName":"name" } @@ -871,7 +1027,7 @@ }, "Bucket":{ "shape":"BucketName", - "documentation":"

The bucket ARN of the bucket.

For Amazon S3 on Outposts specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

", + "documentation":"

Specifies the bucket.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

", "location":"uri", "locationName":"name" } @@ -893,7 +1049,7 @@ }, "Bucket":{ "shape":"BucketName", - "documentation":"

The ARN of the bucket.

For Amazon S3 on Outposts specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

", + "documentation":"

Specifies the bucket.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

", "location":"uri", "locationName":"name" } @@ -915,7 +1071,7 @@ }, "Bucket":{ "shape":"BucketName", - "documentation":"

Specifies the bucket being deleted.

For Amazon S3 on Outposts specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

", + "documentation":"

Specifies the bucket being deleted.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

", "location":"uri", "locationName":"name" } @@ -937,7 +1093,7 @@ }, "Bucket":{ "shape":"BucketName", - "documentation":"

The bucket ARN that has the tag set to be removed.

For Amazon S3 on Outposts specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

", + "documentation":"

The bucket ARN that has the tag set to be removed.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

", "location":"uri", "locationName":"name" } @@ -983,6 +1139,55 @@ } } }, + "DeleteStorageLensConfigurationRequest":{ + "type":"structure", + "required":[ + "ConfigId", + "AccountId" + ], + "members":{ + "ConfigId":{ + "shape":"ConfigId", + "documentation":"

The ID of the S3 Storage Lens configuration.

", + "location":"uri", + "locationName":"storagelensid" + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"

The account ID of the requester.

", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + } + } + }, + "DeleteStorageLensConfigurationTaggingRequest":{ + "type":"structure", + "required":[ + "ConfigId", + "AccountId" + ], + "members":{ + "ConfigId":{ + "shape":"ConfigId", + "documentation":"

The ID of the S3 Storage Lens configuration.

", + "location":"uri", + "locationName":"storagelensid" + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"

The account ID of the requester.

", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + } + } + }, + "DeleteStorageLensConfigurationTaggingResult":{ + "type":"structure", + "members":{ + } + }, "DescribeJobRequest":{ "type":"structure", "required":[ @@ -1019,6 +1224,20 @@ "max":1024, "min":1 }, + "Exclude":{ + "type":"structure", + "members":{ + "Buckets":{ + "shape":"Buckets", + "documentation":"

A container for the S3 Storage Lens bucket excludes.

" + }, + "Regions":{ + "shape":"Regions", + "documentation":"

A container for the S3 Storage Lens Region excludes.

" + } + }, + "documentation":"

A container for what Amazon S3 Storage Lens will exclude.

" + }, "ExpirationStatus":{ "type":"string", "enum":[ @@ -1027,6 +1246,13 @@ ] }, "ExpiredObjectDeleteMarker":{"type":"boolean"}, + "Format":{ + "type":"string", + "enum":[ + "CSV", + "Parquet" + ] + }, "FunctionArnString":{ "type":"string", "max":1024, @@ -1049,7 +1275,7 @@ }, "Name":{ "shape":"AccessPointName", - "documentation":"

The name of the access point whose policy you want to retrieve.

For Amazon S3 on Outposts specify the ARN of the access point accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/accesspoint/<my-accesspoint-name>. For example, to access the access point reports-ap through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/accesspoint/reports-ap. The value must be URL encoded.

", + "documentation":"

The name of the access point whose policy you want to retrieve.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the access point accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/accesspoint/<my-accesspoint-name>. For example, to access the access point reports-ap through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/accesspoint/reports-ap. The value must be URL encoded.

", "location":"uri", "locationName":"name" } @@ -1111,7 +1337,7 @@ }, "Name":{ "shape":"AccessPointName", - "documentation":"

The name of the access point whose configuration information you want to retrieve.

For Amazon S3 on Outposts specify the ARN of the access point accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/accesspoint/<my-accesspoint-name>. For example, to access the access point reports-ap through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/accesspoint/reports-ap. The value must be URL encoded.

", + "documentation":"

The name of the access point whose configuration information you want to retrieve.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the access point accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/accesspoint/<my-accesspoint-name>. For example, to access the access point reports-ap through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/accesspoint/reports-ap. The value must be URL encoded.

", "location":"uri", "locationName":"name" } @@ -1159,7 +1385,7 @@ }, "Bucket":{ "shape":"BucketName", - "documentation":"

The Amazon Resource Name (ARN) of the bucket.

For Amazon S3 on Outposts specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

", + "documentation":"

The Amazon Resource Name (ARN) of the bucket.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

", "location":"uri", "locationName":"name" } @@ -1190,7 +1416,7 @@ }, "Bucket":{ "shape":"BucketName", - "documentation":"

The ARN of the bucket.

For Amazon S3 on Outposts specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

", + "documentation":"

Specifies the bucket.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

", "location":"uri", "locationName":"name" } @@ -1221,7 +1447,7 @@ }, "Bucket":{ "shape":"BucketName", - "documentation":"

The ARN of the bucket.

For Amazon S3 on Outposts specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

", + "documentation":"

Specifies the bucket.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

", "location":"uri", "locationName":"name" } @@ -1260,7 +1486,7 @@ }, "Bucket":{ "shape":"BucketName", - "documentation":"

The ARN of the bucket.

For Amazon S3 on Outposts specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

", + "documentation":"

Specifies the bucket.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

", "location":"uri", "locationName":"name" } @@ -1330,6 +1556,69 @@ } } }, + "GetStorageLensConfigurationRequest":{ + "type":"structure", + "required":[ + "ConfigId", + "AccountId" + ], + "members":{ + "ConfigId":{ + "shape":"ConfigId", + "documentation":"

The ID of the Amazon S3 Storage Lens configuration.

", + "location":"uri", + "locationName":"storagelensid" + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"

The account ID of the requester.

", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + } + } + }, + "GetStorageLensConfigurationResult":{ + "type":"structure", + "members":{ + "StorageLensConfiguration":{ + "shape":"StorageLensConfiguration", + "documentation":"

The S3 Storage Lens configuration requested.

" + } + }, + "payload":"StorageLensConfiguration" + }, + "GetStorageLensConfigurationTaggingRequest":{ + "type":"structure", + "required":[ + "ConfigId", + "AccountId" + ], + "members":{ + "ConfigId":{ + "shape":"ConfigId", + "documentation":"

The ID of the Amazon S3 Storage Lens configuration.

", + "location":"uri", + "locationName":"storagelensid" + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"

The account ID of the requester.

", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + } + } + }, + "GetStorageLensConfigurationTaggingResult":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"StorageLensTags", + "documentation":"

The tags of S3 Storage Lens configuration requested.

" + } + } + }, "GrantFullControl":{"type":"string"}, "GrantRead":{"type":"string"}, "GrantReadACP":{"type":"string"}, @@ -1350,6 +1639,20 @@ "documentation":"

", "exception":true }, + "Include":{ + "type":"structure", + "members":{ + "Buckets":{ + "shape":"Buckets", + "documentation":"

A container for the S3 Storage Lens bucket includes.

" + }, + "Regions":{ + "shape":"Regions", + "documentation":"

A container for the S3 Storage Lens Region includes.

" + } + }, + "documentation":"

A container for what Amazon S3 Storage Lens configuration includes.

" + }, "InternalServiceException":{ "type":"structure", "members":{ @@ -1375,6 +1678,7 @@ "documentation":"

", "exception":true }, + "IsEnabled":{"type":"boolean"}, "IsPublic":{"type":"boolean"}, "JobArn":{ "type":"string", @@ -1909,7 +2213,7 @@ }, "Bucket":{ "shape":"BucketName", - "documentation":"

The name of the bucket whose associated access points you want to list.

For Amazon S3 on Outposts specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

", + "documentation":"

The name of the bucket whose associated access points you want to list.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

", "location":"querystring", "locationName":"bucket" }, @@ -2029,6 +2333,65 @@ } } }, + "ListStorageLensConfigurationEntry":{ + "type":"structure", + "required":[ + "Id", + "StorageLensArn", + "HomeRegion" + ], + "members":{ + "Id":{ + "shape":"ConfigId", + "documentation":"

A container for the S3 Storage Lens configuration ID.

" + }, + "StorageLensArn":{ + "shape":"StorageLensArn", + "documentation":"

The ARN of the S3 Storage Lens configuration. This property is read-only.

" + }, + "HomeRegion":{ + "shape":"S3AWSRegion", + "documentation":"

A container for the S3 Storage Lens home Region. Your metrics data is stored and retained in your designated S3 Storage Lens home Region.

" + }, + "IsEnabled":{ + "shape":"IsEnabled", + "documentation":"

A container for whether the S3 Storage Lens configuration is enabled. This property is required.

" + } + }, + "documentation":"

Part of ListStorageLensConfigurationResult. Each entry includes the description of the S3 Storage Lens configuration, its home Region, whether it is enabled, its Amazon Resource Name (ARN), and config ID.

" + }, + "ListStorageLensConfigurationsRequest":{ + "type":"structure", + "required":["AccountId"], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

The account ID of the requester.

", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + }, + "NextToken":{ + "shape":"ContinuationToken", + "documentation":"

A pagination token to request the next page of results.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListStorageLensConfigurationsResult":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"ContinuationToken", + "documentation":"

If the request produced more than the maximum number of S3 Storage Lens configuration results, you can pass this value into a subsequent request to retrieve the next page of results.

" + }, + "StorageLensConfigurationList":{ + "shape":"StorageLensConfigurationList", + "documentation":"

A list of S3 Storage Lens configurations.

" + } + } + }, "Location":{"type":"string"}, "MaxLength1024String":{ "type":"string", @@ -2039,6 +2402,11 @@ "max":1000, "min":0 }, + "MinStorageBytesPercentage":{ + "type":"double", + "max":100, + "min":0.1 + }, "NetworkOrigin":{ "type":"string", "enum":[ @@ -2128,6 +2496,10 @@ "S3PutObjectRetention" ] }, + "OutputSchemaVersion":{ + "type":"string", + "enum":["V_1"] + }, "Policy":{"type":"string"}, "PolicyStatus":{ "type":"structure", @@ -2141,6 +2513,28 @@ "documentation":"

Indicates whether this access point policy is public. For more information about how Amazon S3 evaluates policies to determine whether they are public, see The Meaning of \"Public\" in the Amazon Simple Storage Service Developer Guide.

" }, "Prefix":{"type":"string"}, + "PrefixLevel":{ + "type":"structure", + "required":["StorageMetrics"], + "members":{ + "StorageMetrics":{ + "shape":"PrefixLevelStorageMetrics", + "documentation":"

A container for the prefix-level storage metrics for S3 Storage Lens.

" + } + }, + "documentation":"

A container for the prefix-level configuration.

" + }, + "PrefixLevelStorageMetrics":{ + "type":"structure", + "members":{ + "IsEnabled":{ + "shape":"IsEnabled", + "documentation":"

A container for whether prefix-level storage metrics are enabled.

" + }, + "SelectionCriteria":{"shape":"SelectionCriteria"} + }, + "documentation":"

A container for the prefix-level storage metrics for S3 Storage Lens.

" + }, "PublicAccessBlockConfiguration":{ "type":"structure", "members":{ @@ -2161,11 +2555,11 @@ }, "RestrictPublicBuckets":{ "shape":"Setting", - "documentation":"

Specifies whether Amazon S3 should restrict public bucket policies for buckets in this account. Setting this element to TRUE restricts access to buckets with public policies to only AWS services and authorized users within this account.

Enabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked.

This is not supported for Amazon S3 on Outposts.

", + "documentation":"

Specifies whether Amazon S3 should restrict public bucket policies for buckets in this account. Setting this element to TRUE restricts access to buckets with public policies to only AWS service principals and authorized users within this account.

Enabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked.

This is not supported for Amazon S3 on Outposts.

", "locationName":"RestrictPublicBuckets" } }, - "documentation":"

The PublicAccessBlock configuration that you want to apply to this Amazon S3 bucket. You can enable the configuration options in any combination. For more information about when Amazon S3 considers a bucket or object public, see The Meaning of \"Public\" in the Amazon Simple Storage Service Developer Guide.

This is not supported for Amazon S3 on Outposts.

" + "documentation":"

The PublicAccessBlock configuration that you want to apply to this Amazon S3 account. You can enable the configuration options in any combination. For more information about when Amazon S3 considers a bucket or object public, see The Meaning of \"Public\" in the Amazon Simple Storage Service Developer Guide.

This is not supported for Amazon S3 on Outposts.

" }, "PublicAccessBlockEnabled":{"type":"boolean"}, "PutAccessPointPolicyRequest":{ @@ -2185,13 +2579,13 @@ }, "Name":{ "shape":"AccessPointName", - "documentation":"

The name of the access point that you want to associate with the specified policy.

For Amazon S3 on Outposts specify the ARN of the access point accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/accesspoint/<my-accesspoint-name>. For example, to access the access point reports-ap through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/accesspoint/reports-ap. The value must be URL encoded.

", + "documentation":"

The name of the access point that you want to associate with the specified policy.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the access point accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/accesspoint/<my-accesspoint-name>. For example, to access the access point reports-ap through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/accesspoint/reports-ap. The value must be URL encoded.

", "location":"uri", "locationName":"name" }, "Policy":{ "shape":"Policy", - "documentation":"

The policy that you want to apply to the specified access point. For more information about access point policies, see Managing Data Access with Amazon S3 Access Points in the Amazon Simple Storage Service Developer Guide.

" + "documentation":"

The policy that you want to apply to the specified access point. For more information about access point policies, see Managing data access with Amazon S3 Access Points in the Amazon Simple Storage Service Developer Guide.

" } } }, @@ -2241,7 +2635,7 @@ }, "Bucket":{ "shape":"BucketName", - "documentation":"

The ARN of the bucket.

For Amazon S3 on Outposts specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

", + "documentation":"

Specifies the bucket.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

", "location":"uri", "locationName":"name" }, @@ -2274,7 +2668,7 @@ }, "Bucket":{ "shape":"BucketName", - "documentation":"

The Amazon Resource Name (ARN) of the bucket.

For Amazon S3 on Outposts specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

", + "documentation":"

The Amazon Resource Name (ARN) of the bucket.

For using this parameter with Amazon S3 on Outposts with the REST API, you must specify the name and the x-amz-outpost-id as well.

For using this parameter with S3 on Outposts with the AWS SDK and CLI, you must specify the ARN of the bucket accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>. For example, to access the bucket reports through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. The value must be URL encoded.

", "location":"uri", "locationName":"name" }, @@ -2342,6 +2736,69 @@ }, "payload":"PublicAccessBlockConfiguration" }, + "PutStorageLensConfigurationRequest":{ + "type":"structure", + "required":[ + "ConfigId", + "AccountId", + "StorageLensConfiguration" + ], + "members":{ + "ConfigId":{ + "shape":"ConfigId", + "documentation":"

The ID of the S3 Storage Lens configuration.

", + "location":"uri", + "locationName":"storagelensid" + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"

The account ID of the requester.

", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + }, + "StorageLensConfiguration":{ + "shape":"StorageLensConfiguration", + "documentation":"

The S3 Storage Lens configuration.

" + }, + "Tags":{ + "shape":"StorageLensTags", + "documentation":"

The tag set of the S3 Storage Lens configuration.

You can set up to a maximum of 50 tags.

" + } + } + }, + "PutStorageLensConfigurationTaggingRequest":{ + "type":"structure", + "required":[ + "ConfigId", + "AccountId", + "Tags" + ], + "members":{ + "ConfigId":{ + "shape":"ConfigId", + "documentation":"

The ID of the S3 Storage Lens configuration.

", + "location":"uri", + "locationName":"storagelensid" + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"

The account ID of the requester.

", + "hostLabel":true, + "location":"header", + "locationName":"x-amz-account-id" + }, + "Tags":{ + "shape":"StorageLensTags", + "documentation":"

The tag set of the S3 Storage Lens configuration.

You can set up to a maximum of 50 tags.

" + } + } + }, + "PutStorageLensConfigurationTaggingResult":{ + "type":"structure", + "members":{ + } + }, "RegionalBucket":{ "type":"structure", "required":[ @@ -2380,6 +2837,13 @@ "locationName":"RegionalBucket" } }, + "Regions":{ + "type":"list", + "member":{ + "shape":"S3AWSRegion", + "locationName":"Region" + } + }, "ReportPrefixString":{ "type":"string", "max":512, @@ -2392,6 +2856,12 @@ "Ready" ] }, + "S3AWSRegion":{ + "type":"string", + "max":30, + "min":5, + "pattern":"[a-z0-9\\-]+" + }, "S3AccessControlList":{ "type":"structure", "required":["Owner"], @@ -2434,6 +2904,42 @@ "min":1, "pattern":"arn:[^:]+:s3:.*" }, + "S3BucketDestination":{ + "type":"structure", + "required":[ + "Format", + "OutputSchemaVersion", + "AccountId", + "Arn" + ], + "members":{ + "Format":{ + "shape":"Format", + "documentation":"

" + }, + "OutputSchemaVersion":{ + "shape":"OutputSchemaVersion", + "documentation":"

The schema version of the export file.

" + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"

The account ID of the owner of the S3 Storage Lens metrics export bucket.

" + }, + "Arn":{ + "shape":"S3BucketArnString", + "documentation":"

The Amazon Resource Name (ARN) of the bucket. This property is read-only and follows the following format: arn:aws:s3:us-east-1:example-account-id:bucket/your-destination-bucket-name

" + }, + "Prefix":{ + "shape":"Prefix", + "documentation":"

The prefix of the destination bucket where the metrics export will be delivered.

" + }, + "Encryption":{ + "shape":"StorageLensDataExportEncryption", + "documentation":"

The container for the type encryption of the metrics exports in this bucket.

" + } + }, + "documentation":"

A container for the bucket where the Amazon S3 Storage Lens metrics export files are located.

" + }, "S3CannedAccessControlList":{ "type":"string", "enum":[ @@ -2455,7 +2961,7 @@ "members":{ "TargetResource":{ "shape":"S3BucketArnString", - "documentation":"

" + "documentation":"

Specifies the destination bucket ARN for the batch copy operation. For example, to copy objects to a bucket named \"destinationBucket\", set the TargetResource to \"arn:aws:s3:::destinationBucket\".

" }, "CannedAccessControlList":{ "shape":"S3CannedAccessControlList", @@ -2485,7 +2991,7 @@ }, "RedirectLocation":{ "shape":"NonEmptyMaxLength2048String", - "documentation":"

" + "documentation":"

Specifies an optional metadata property for website redirects, x-amz-website-redirect-location. Allows webpage redirects if the object is accessed through a website endpoint.

" }, "RequesterPays":{ "shape":"Boolean", @@ -2505,7 +3011,7 @@ }, "TargetKeyPrefix":{ "shape":"NonEmptyMaxLength1024String", - "documentation":"

" + "documentation":"

Specifies the folder prefix into which you would like the objects to be copied. For example, to copy objects into a folder named \"Folder1\" in the destination bucket, set the TargetKeyPrefix to \"Folder1/\".

" }, "ObjectLockLegalHoldStatus":{ "shape":"S3ObjectLockLegalHoldStatus", @@ -2829,7 +3335,174 @@ "value":{"shape":"MaxLength1024String"}, "max":8192 }, + "SSEKMS":{ + "type":"structure", + "required":["KeyId"], + "members":{ + "KeyId":{ + "shape":"SSEKMSKeyId", + "documentation":"

A container for the ARN of the SSE-KMS encryption. This property is read-only and follows the following format: arn:aws:kms:us-east-1:example-account-id:key/example-9a73-4afc-8d29-8f5900cef44e

" + } + }, + "documentation":"

", + "locationName":"SSE-KMS" + }, + "SSEKMSKeyId":{"type":"string"}, + "SSES3":{ + "type":"structure", + "members":{ + }, + "documentation":"

", + "locationName":"SSE-S3" + }, + "SelectionCriteria":{ + "type":"structure", + "members":{ + "Delimiter":{ + "shape":"StorageLensPrefixLevelDelimiter", + "documentation":"

A container for the delimiter of the selection criteria being used.

" + }, + "MaxDepth":{ + "shape":"StorageLensPrefixLevelMaxDepth", + "documentation":"

The max depth of the selection criteria

" + }, + "MinStorageBytesPercentage":{ + "shape":"MinStorageBytesPercentage", + "documentation":"

The minimum number of storage bytes percentage whose metrics will be selected.

You must choose a value greater than or equal to 1.0.

" + } + }, + "documentation":"

" + }, "Setting":{"type":"boolean"}, + "StorageLensArn":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"arn:[a-z\\-]+:s3:[a-z0-9\\-]+:\\d{12}:storage\\-lens\\/.*" + }, + "StorageLensAwsOrg":{ + "type":"structure", + "required":["Arn"], + "members":{ + "Arn":{ + "shape":"AwsOrgArn", + "documentation":"

A container for the Amazon Resource Name (ARN) of the AWS organization. This property is read-only and follows the following format: arn:aws:organizations:us-east-1:example-account-id:organization/o-ex2l495dck

" + } + }, + "documentation":"

The AWS organization for your S3 Storage Lens.

" + }, + "StorageLensConfiguration":{ + "type":"structure", + "required":[ + "Id", + "AccountLevel", + "IsEnabled" + ], + "members":{ + "Id":{ + "shape":"ConfigId", + "documentation":"

A container for the Amazon S3 Storage Lens configuration ID.

" + }, + "AccountLevel":{ + "shape":"AccountLevel", + "documentation":"

A container for all the account-level configurations of your S3 Storage Lens configuration.

" + }, + "Include":{ + "shape":"Include", + "documentation":"

A container for what is included in this configuration. This container can only be valid if there is no Exclude container submitted, and it's not empty.

" + }, + "Exclude":{ + "shape":"Exclude", + "documentation":"

A container for what is excluded in this configuration. This container can only be valid if there is no Include container submitted, and it's not empty.

" + }, + "DataExport":{ + "shape":"StorageLensDataExport", + "documentation":"

A container to specify the properties of your S3 Storage Lens metrics export including, the destination, schema and format.

" + }, + "IsEnabled":{ + "shape":"IsEnabled", + "documentation":"

A container for whether the S3 Storage Lens configuration is enabled.

" + }, + "AwsOrg":{ + "shape":"StorageLensAwsOrg", + "documentation":"

A container for the AWS organization for this S3 Storage Lens configuration.

" + }, + "StorageLensArn":{ + "shape":"StorageLensArn", + "documentation":"

The Amazon Resource Name (ARN) of the S3 Storage Lens configuration. This property is read-only and follows the following format: arn:aws:s3:us-east-1:example-account-id:storage-lens/your-dashboard-name

" + } + }, + "documentation":"

A container for the Amazon S3 Storage Lens configuration.

" + }, + "StorageLensConfigurationList":{ + "type":"list", + "member":{ + "shape":"ListStorageLensConfigurationEntry", + "locationName":"StorageLensConfiguration" + }, + "flattened":true + }, + "StorageLensDataExport":{ + "type":"structure", + "required":["S3BucketDestination"], + "members":{ + "S3BucketDestination":{ + "shape":"S3BucketDestination", + "documentation":"

A container for the bucket where the S3 Storage Lens metrics export will be located.

" + } + }, + "documentation":"

A container to specify the properties of your S3 Storage Lens metrics export, including the destination, schema, and format.

" + }, + "StorageLensDataExportEncryption":{ + "type":"structure", + "members":{ + "SSES3":{ + "shape":"SSES3", + "documentation":"

", + "locationName":"SSE-S3" + }, + "SSEKMS":{ + "shape":"SSEKMS", + "documentation":"

", + "locationName":"SSE-KMS" + } + }, + "documentation":"

A container for the encryption of the S3 Storage Lens metrics exports.

" + }, + "StorageLensPrefixLevelDelimiter":{ + "type":"string", + "max":1 + }, + "StorageLensPrefixLevelMaxDepth":{ + "type":"integer", + "max":10, + "min":1 + }, + "StorageLensTag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{ + "shape":"TagKeyString", + "documentation":"

" + }, + "Value":{ + "shape":"TagValueString", + "documentation":"

" + } + }, + "documentation":"

" + }, + "StorageLensTags":{ + "type":"list", + "member":{ + "shape":"StorageLensTag", + "locationName":"Tag" + } + }, "StringForNextToken":{ "type":"string", "max":1024, @@ -2897,7 +3570,7 @@ "documentation":"

The storage class to which you want the object to transition.

" } }, - "documentation":"

Specifies when an object transitions to a specified storage class. For more information about Amazon S3 Lifecycle configuration rules, see Transitioning Objects Using Amazon S3 Lifecycle in the Amazon Simple Storage Service Developer Guide.

" + "documentation":"

Specifies when an object transitions to a specified storage class. For more information about Amazon S3 Lifecycle configuration rules, see Transitioning objects using Amazon S3 Lifecycle in the Amazon Simple Storage Service Developer Guide.

" }, "TransitionList":{ "type":"list", diff --git a/services/s3control/src/main/resources/software/amazon/awssdk/services/s3control/execution.interceptors b/services/s3control/src/main/resources/software/amazon/awssdk/services/s3control/execution.interceptors index b19e9f79a356..bc59d1fe2d3e 100644 --- a/services/s3control/src/main/resources/software/amazon/awssdk/services/s3control/execution.interceptors +++ b/services/s3control/src/main/resources/software/amazon/awssdk/services/s3control/execution.interceptors @@ -1,2 +1,3 @@ +software.amazon.awssdk.services.s3control.internal.interceptors.DisableDoubleUrlEncodingForSigningInterceptor software.amazon.awssdk.services.s3control.internal.interceptors.EndpointAddressInterceptor software.amazon.awssdk.services.s3control.internal.interceptors.PayloadSigningInterceptor \ No newline at end of file diff --git a/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/functionaltests/arns/NonArnOutpostRequetsTest.java b/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/functionaltests/arns/NonArnOutpostRequestTest.java similarity index 98% rename from services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/functionaltests/arns/NonArnOutpostRequetsTest.java rename to services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/functionaltests/arns/NonArnOutpostRequestTest.java index 4f3e7cd17a3c..12534ca47bbe 100644 --- a/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/functionaltests/arns/NonArnOutpostRequetsTest.java +++ b/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/functionaltests/arns/NonArnOutpostRequestTest.java @@ -34,7 +34,7 @@ import software.amazon.awssdk.services.s3control.S3ControlClient; -public class NonArnOutpostRequetsTest extends S3ControlWireMockTestBase { +public class NonArnOutpostRequestTest extends S3ControlWireMockTestBase { private S3ControlClient s3; private static final String EXPECTED_URL = "/v20180820/bucket"; diff --git a/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/functionaltests/arns/S3AccessPointArnTest.java b/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/functionaltests/arns/S3AccessPointArnTest.java index e84c9e5f98af..b22ede8fec84 100644 --- a/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/functionaltests/arns/S3AccessPointArnTest.java +++ b/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/functionaltests/arns/S3AccessPointArnTest.java @@ -21,10 +21,9 @@ import org.junit.rules.ExpectedException; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.s3control.S3ControlClient; -import software.amazon.awssdk.services.s3control.S3ControlClientBuilder; public class S3AccessPointArnTest extends S3ControlWireMockTestBase { - private S3ControlClient s3; + private S3ControlClient s3Control; private static final String EXPECTED_URL = "/v20180820/accesspoint/myendpoint"; @Rule @@ -32,63 +31,57 @@ public class S3AccessPointArnTest extends S3ControlWireMockTestBase { @Before public void methodSetUp() { - s3 = buildClient(); + s3Control = buildClient(); } @Test public void malformedArn_MissingOutpostSegment_shouldThrowException() { - S3ControlClient s3ControlForTest = buildClientCustom().build(); - String accessPointArn = "arn:aws:s3-outposts:us-west-2:123456789012:outpost"; exception.expect(IllegalArgumentException.class); exception.expectMessage("Unknown ARN type"); - s3ControlForTest.getAccessPoint(b -> b.name(accessPointArn)); + s3Control.getAccessPoint(b -> b.name(accessPointArn)); } @Test public void malformedArn_MissingAccessPointSegment_shouldThrowException() { - S3ControlClient s3ControlForTest = buildClientCustom().build(); - String accessPointArn = "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456"; exception.expect(IllegalArgumentException.class); exception.expectMessage("Invalid format"); - s3ControlForTest.getAccessPoint(b -> b.name(accessPointArn)); + s3Control.getAccessPoint(b -> b.name(accessPointArn)); } @Test public void malformedArn_MissingAccessPointName_shouldThrowException() { - S3ControlClient s3ControlForTest = buildClientCustom().build(); - String accessPointArn = "arn:aws:s3-outposts:us-west-2:123456789012:outpost:myaccesspoint"; exception.expect(IllegalArgumentException.class); exception.expectMessage("Invalid format"); - s3ControlForTest.getAccessPoint(b -> b.name(accessPointArn)); + s3Control.getAccessPoint(b -> b.name(accessPointArn)); } @Test public void accessPointArn_ClientHasCustomEndpoint_throwsIllegalArgumentException() { - S3ControlClient s3Control = buildClientWithCustomEndpoint("https://foo.bar", "us-east-1"); + S3ControlClient s3ControlCustom = buildClientWithCustomEndpoint("https://foo.bar", "us-east-1"); String accessPointArn = "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint" + ":myaccesspoint"; exception.expect(IllegalArgumentException.class); exception.expectMessage("endpoint"); - s3Control.getAccessPoint(b -> b.name(accessPointArn)); + s3ControlCustom.getAccessPoint(b -> b.name(accessPointArn)); } @Test public void bucketArnDifferentRegionNoConfigFlag_throwsIllegalArgumentException() { - S3ControlClient s3ControlForTest = initializedBuilder().region(Region.of("us-east-1")).build(); + S3ControlClient s3ControlCustom = initializedBuilder().region(Region.of("us-east-1")).build(); String accessPointArn = "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint" + ":myaccesspoint"; exception.expect(IllegalArgumentException.class); exception.expectMessage("does not match the region the client was configured with"); - s3ControlForTest.getAccessPoint(b -> b.name(accessPointArn)); + s3ControlCustom.getAccessPoint(b -> b.name(accessPointArn)); } @Override diff --git a/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/functionaltests/signing/UrlEncodingTest.java b/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/functionaltests/signing/UrlEncodingTest.java new file mode 100644 index 000000000000..0e65c83f2a3d --- /dev/null +++ b/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/functionaltests/signing/UrlEncodingTest.java @@ -0,0 +1,101 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3control.internal.functionaltests.signing; + +import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.client.WireMock.get; +import static com.github.tomakehurst.wiremock.client.WireMock.stubFor; +import static com.github.tomakehurst.wiremock.client.WireMock.urlMatching; +import static org.assertj.core.api.Assertions.assertThat; + +import java.net.URI; +import com.github.tomakehurst.wiremock.junit.WireMockRule; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.signer.AwsSignerExecutionAttribute; +import software.amazon.awssdk.core.interceptor.Context; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3control.S3ControlClient; + + +public class UrlEncodingTest { + private static final URI HTTP_LOCALHOST_URI = URI.create("http://localhost:8080/"); + private static final String EXPECTED_URL = "/v20180820/jobs/id"; + + private S3ControlClient s3Control; + private ExecutionAttributeInterceptor interceptor; + + @Rule + public WireMockRule mockServer = new WireMockRule(); + + protected S3ControlClient buildClient() { + this.interceptor = new ExecutionAttributeInterceptor(HTTP_LOCALHOST_URI); + + return S3ControlClient.builder() + .credentialsProvider(() -> AwsBasicCredentials.create("test", "test")) + .region(Region.US_WEST_2) + .overrideConfiguration(o -> o.addExecutionInterceptor(this.interceptor)) + .build(); + } + + @Before + public void methodSetUp() { + s3Control = buildClient(); + } + + @Test + public void any_request_should_set_double_url_encode_to_false() { + stubFor(get(urlMatching(EXPECTED_URL)).willReturn(aResponse().withBody("").withStatus(200))); + + s3Control.describeJob(b -> b.accountId("123456789012").jobId("id")); + + assertThat(interceptor.signerDoubleUrlEncode()).isNotNull(); + assertThat(interceptor.signerDoubleUrlEncode()).isFalse(); + } + + /** + * In addition to checking the signing attribute, the interceptor sets the endpoint since + * S3 control prepends the account id to the host name and wiremock won't intercept the request + */ + private static class ExecutionAttributeInterceptor implements ExecutionInterceptor { + private final URI rerouteEndpoint; + private Boolean signerDoubleUrlEncode; + + ExecutionAttributeInterceptor(URI rerouteEndpoint) { + this.rerouteEndpoint = rerouteEndpoint; + } + + @Override + public void beforeExecution(Context.BeforeExecution context, ExecutionAttributes executionAttributes) { + signerDoubleUrlEncode = executionAttributes.getAttribute(AwsSignerExecutionAttribute.SIGNER_DOUBLE_URL_ENCODE); + } + + @Override + public SdkHttpRequest modifyHttpRequest(Context.ModifyHttpRequest context, ExecutionAttributes executionAttributes) { + SdkHttpRequest request = context.httpRequest(); + return request.toBuilder().uri(rerouteEndpoint).build(); + } + + public Boolean signerDoubleUrlEncode() { + return signerDoubleUrlEncode; + } + } +} diff --git a/services/s3outposts/pom.xml b/services/s3outposts/pom.xml index 145582c502c8..70824ac0c21e 100644 --- a/services/s3outposts/pom.xml +++ b/services/s3outposts/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT s3outposts AWS Java SDK :: Services :: S3 Outposts diff --git a/services/sagemaker/pom.xml b/services/sagemaker/pom.xml index 6a40d5419410..5c0d25e1355b 100644 --- a/services/sagemaker/pom.xml +++ b/services/sagemaker/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT 4.0.0 sagemaker diff --git a/services/sagemaker/src/main/resources/codegen-resources/paginators-1.json b/services/sagemaker/src/main/resources/codegen-resources/paginators-1.json index 7f87ca77493b..482033820fe4 100644 --- a/services/sagemaker/src/main/resources/codegen-resources/paginators-1.json +++ b/services/sagemaker/src/main/resources/codegen-resources/paginators-1.json @@ -1,17 +1,41 @@ { "pagination": { + "ListActions": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "ActionSummaries" + }, "ListAlgorithms": { "input_token": "NextToken", "output_token": "NextToken", "limit_key": "MaxResults", "result_key": "AlgorithmSummaryList" }, + "ListAppImageConfigs": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "AppImageConfigs" + }, "ListApps": { "input_token": "NextToken", "output_token": "NextToken", "limit_key": "MaxResults", "result_key": "Apps" }, + "ListArtifacts": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "ArtifactSummaries" + }, + "ListAssociations": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "AssociationSummaries" + }, "ListAutoMLJobs": { "input_token": "NextToken", "output_token": "NextToken", @@ -36,6 +60,12 @@ "limit_key": "MaxResults", "result_key": "CompilationJobSummaries" }, + "ListContexts": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "ContextSummaries" + }, "ListDomains": { "input_token": "NextToken", "output_token": "NextToken", @@ -60,6 +90,12 @@ "limit_key": "MaxResults", "result_key": "ExperimentSummaries" }, + "ListFeatureGroups": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "FeatureGroupSummaries" + }, "ListFlowDefinitions": { "input_token": "NextToken", "output_token": "NextToken", @@ -78,6 +114,18 @@ "limit_key": "MaxResults", "result_key": "HyperParameterTuningJobSummaries" }, + "ListImageVersions": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "ImageVersions" + }, + "ListImages": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Images" + }, "ListLabelingJobs": { "input_token": "NextToken", "output_token": "NextToken", @@ -90,6 +138,12 @@ "limit_key": "MaxResults", "result_key": "LabelingJobSummaryList" }, + "ListModelPackageGroups": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "ModelPackageGroupSummaryList" + }, "ListModelPackages": { "input_token": "NextToken", "output_token": "NextToken", @@ -126,12 +180,41 @@ "limit_key": "MaxResults", "result_key": "NotebookInstances" }, + "ListPipelineExecutionSteps": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "PipelineExecutionSteps" + }, + "ListPipelineExecutions": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "PipelineExecutionSummaries" + }, + "ListPipelineParametersForExecution": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "PipelineParameters" + }, + "ListPipelines": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "PipelineSummaries" + }, "ListProcessingJobs": { "input_token": "NextToken", "output_token": "NextToken", "limit_key": "MaxResults", "result_key": "ProcessingJobSummaries" }, + "ListProjects": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, "ListSubscribedWorkteams": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/services/sagemaker/src/main/resources/codegen-resources/service-2.json b/services/sagemaker/src/main/resources/codegen-resources/service-2.json index 8f4f2da939e9..18ecc3499392 100644 --- a/services/sagemaker/src/main/resources/codegen-resources/service-2.json +++ b/services/sagemaker/src/main/resources/codegen-resources/service-2.json @@ -14,6 +14,20 @@ "uid":"sagemaker-2017-07-24" }, "operations":{ + "AddAssociation":{ + "name":"AddAssociation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddAssociationRequest"}, + "output":{"shape":"AddAssociationResponse"}, + "errors":[ + {"shape":"ResourceNotFound"}, + {"shape":"ResourceLimitExceeded"} + ], + "documentation":"

Creates an association between the source and the destination. A source can be associated with multiple destinations, and a destination can be associated with multiple sources. An association is a lineage tracking entity. For more information, see Amazon SageMaker ML Lineage Tracking.

" + }, "AddTags":{ "name":"AddTags", "http":{ @@ -38,6 +52,19 @@ ], "documentation":"

Associates a trial component with a trial. A trial component can be associated with multiple trials. To disassociate a trial component from a trial, call the DisassociateTrialComponent API.

" }, + "CreateAction":{ + "name":"CreateAction", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateActionRequest"}, + "output":{"shape":"CreateActionResponse"}, + "errors":[ + {"shape":"ResourceLimitExceeded"} + ], + "documentation":"

Creates an action. An action is a lineage tracking entity that represents an action or activity. For example, a model deployment or an HPO job. Generally, an action involves at least one input or output artifact. For more information, see Amazon SageMaker ML Lineage Tracking.

" + }, "CreateAlgorithm":{ "name":"CreateAlgorithm", "http":{ @@ -62,6 +89,32 @@ ], "documentation":"

Creates a running App for the specified UserProfile. Supported Apps are JupyterServer and KernelGateway. This operation is automatically invoked by Amazon SageMaker Studio upon access to the associated Domain, and when new kernel configurations are selected by the user. A user may have multiple Apps active simultaneously.

" }, + "CreateAppImageConfig":{ + "name":"CreateAppImageConfig", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateAppImageConfigRequest"}, + "output":{"shape":"CreateAppImageConfigResponse"}, + "errors":[ + {"shape":"ResourceInUse"} + ], + "documentation":"

Creates a configuration for running a SageMaker image as a KernelGateway app. The configuration specifies the Amazon Elastic File System (EFS) storage volume on the image, and a list of the kernels in the image.

" + }, + "CreateArtifact":{ + "name":"CreateArtifact", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateArtifactRequest"}, + "output":{"shape":"CreateArtifactResponse"}, + "errors":[ + {"shape":"ResourceLimitExceeded"} + ], + "documentation":"

Creates an artifact. An artifact is a lineage tracking entity that represents a URI addressable object or data. Some examples are the S3 URI of a dataset and the ECR registry path of an image. For more information, see Amazon SageMaker ML Lineage Tracking.

" + }, "CreateAutoMLJob":{ "name":"CreateAutoMLJob", "http":{ @@ -100,6 +153,19 @@ ], "documentation":"

Starts a model compilation job. After the model has been compiled, Amazon SageMaker saves the resulting model artifacts to an Amazon Simple Storage Service (Amazon S3) bucket that you specify.

If you choose to host your model using Amazon SageMaker hosting services, you can use the resulting model artifacts as part of the model. You can also use the artifacts with AWS IoT Greengrass. In that case, deploy them as an ML resource.

In the request body, you provide the following:

  • A name for the compilation job

  • Information about the input model artifacts

  • The output location for the compiled model and the device (target) that the model runs on

  • The Amazon Resource Name (ARN) of the IAM role that Amazon SageMaker assumes to perform the model compilation job.

You can also provide a Tag to track the model compilation job's resource use and costs. The response body contains the CompilationJobArn for the compiled job.

To stop a model compilation job, use StopCompilationJob. To get information about a particular model compilation job, use DescribeCompilationJob. To get information about multiple model compilation jobs, use ListCompilationJobs.

" }, + "CreateContext":{ + "name":"CreateContext", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateContextRequest"}, + "output":{"shape":"CreateContextResponse"}, + "errors":[ + {"shape":"ResourceLimitExceeded"} + ], + "documentation":"

Creates a context. A context is a lineage tracking entity that represents a logical grouping of other tracking or experiment entities. Some examples are an endpoint and a model package. For more information, see Amazon SageMaker ML Lineage Tracking.

" + }, "CreateDomain":{ "name":"CreateDomain", "http":{ @@ -112,7 +178,7 @@ {"shape":"ResourceLimitExceeded"}, {"shape":"ResourceInUse"} ], - "documentation":"

Creates a Domain used by Amazon SageMaker Studio. A domain consists of an associated Amazon Elastic File System (EFS) volume, a list of authorized users, and a variety of security, application, policy, and Amazon Virtual Private Cloud (VPC) configurations. An AWS account is limited to one domain per region. Users within a domain can share notebook files and other artifacts with each other.

When a domain is created, an EFS volume is created for use by all of the users within the domain. Each user receives a private home directory within the EFS volume for notebooks, Git repositories, and data files.

VPC configuration

All SageMaker Studio traffic between the domain and the EFS volume is through the specified VPC and subnets. For other Studio traffic, you can specify the AppNetworkAccessType parameter. AppNetworkAccessType corresponds to the network access type that you choose when you onboard to Studio. The following options are available:

  • PublicInternetOnly - Non-EFS traffic goes through a VPC managed by Amazon SageMaker, which allows internet access. This is the default value.

  • VpcOnly - All Studio traffic is through the specified VPC and subnets. Internet access is disabled by default. To allow internet access, you must specify a NAT gateway.

    When internet access is disabled, you won't be able to train or host models unless your VPC has an interface endpoint (PrivateLink) or a NAT gateway and your security groups allow outbound connections.

VpcOnly network access type

When you choose VpcOnly, you must specify the following:

  • Security group inbound and outbound rules to allow NFS traffic over TCP on port 2049 between the domain and the EFS volume

  • Security group inbound and outbound rules to allow traffic between the JupyterServer app and the KernelGateway apps

  • Interface endpoints to access the SageMaker API and SageMaker runtime

For more information, see:

" + "documentation":"

Creates a Domain used by Amazon SageMaker Studio. A domain consists of an associated Amazon Elastic File System (EFS) volume, a list of authorized users, and a variety of security, application, policy, and Amazon Virtual Private Cloud (VPC) configurations. An AWS account is limited to one domain per region. Users within a domain can share notebook files and other artifacts with each other.

EFS storage

When a domain is created, an EFS volume is created for use by all of the users within the domain. Each user receives a private home directory within the EFS volume for notebooks, Git repositories, and data files.

SageMaker uses the AWS Key Management Service (AWS KMS) to encrypt the EFS volume attached to the domain with an AWS managed customer master key (CMK) by default. For more control, you can specify a customer managed CMK. For more information, see Protect Data at Rest Using Encryption.

VPC configuration

All SageMaker Studio traffic between the domain and the EFS volume is through the specified VPC and subnets. For other Studio traffic, you can specify the AppNetworkAccessType parameter. AppNetworkAccessType corresponds to the network access type that you choose when you onboard to Studio. The following options are available:

  • PublicInternetOnly - Non-EFS traffic goes through a VPC managed by Amazon SageMaker, which allows internet access. This is the default value.

  • VpcOnly - All Studio traffic is through the specified VPC and subnets. Internet access is disabled by default. To allow internet access, you must specify a NAT gateway.

    When internet access is disabled, you won't be able to run a Studio notebook or to train or host models unless your VPC has an interface endpoint to the SageMaker API and runtime or a NAT gateway and your security groups allow outbound connections.

For more information, see Connect SageMaker Studio Notebooks to Resources in a VPC.

" }, "CreateEndpoint":{ "name":"CreateEndpoint", @@ -125,7 +191,7 @@ "errors":[ {"shape":"ResourceLimitExceeded"} ], - "documentation":"

Creates an endpoint using the endpoint configuration specified in the request. Amazon SageMaker uses the endpoint to provision resources and deploy models. You create the endpoint configuration with the CreateEndpointConfig API.

Use this API to deploy models using Amazon SageMaker hosting services.

For an example that calls this method when deploying a model to Amazon SageMaker hosting services, see Deploy the Model to Amazon SageMaker Hosting Services (AWS SDK for Python (Boto 3)).

You must not delete an EndpointConfig that is in use by an endpoint that is live or while the UpdateEndpoint or CreateEndpoint operations are being performed on the endpoint. To update an endpoint, you must create a new EndpointConfig.

The endpoint name must be unique within an AWS Region in your AWS account.

When it receives the request, Amazon SageMaker creates the endpoint, launches the resources (ML compute instances), and deploys the model(s) on them.

When you call CreateEndpoint, a load call is made to DynamoDB to verify that your endpoint configuration exists. When you read data from a DynamoDB table supporting Eventually Consistent Reads , the response might not reflect the results of a recently completed write operation. The response might include some stale data. If the dependent entities are not yet in DynamoDB, this causes a validation error. If you repeat your read request after a short time, the response should return the latest data. So retry logic is recommended to handle these possible issues. We also recommend that customers call DescribeEndpointConfig before calling CreateEndpoint to minimize the potential impact of a DynamoDB eventually consistent read.

When Amazon SageMaker receives the request, it sets the endpoint status to Creating. After it creates the endpoint, it sets the status to InService. Amazon SageMaker can then process incoming requests for inferences. To check the status of an endpoint, use the DescribeEndpoint API.

If any of the models hosted at this endpoint get model data from an Amazon S3 location, Amazon SageMaker uses AWS Security Token Service to download model artifacts from the S3 path you provided. AWS STS is activated in your IAM user account by default. If you previously deactivated AWS STS for a region, you need to reactivate AWS STS for that region. For more information, see Activating and Deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide.

" + "documentation":"

Creates an endpoint using the endpoint configuration specified in the request. Amazon SageMaker uses the endpoint to provision resources and deploy models. You create the endpoint configuration with the CreateEndpointConfig API.

Use this API to deploy models using Amazon SageMaker hosting services.

For an example that calls this method when deploying a model to Amazon SageMaker hosting services, see Deploy the Model to Amazon SageMaker Hosting Services (AWS SDK for Python (Boto 3)).

You must not delete an EndpointConfig that is in use by an endpoint that is live or while the UpdateEndpoint or CreateEndpoint operations are being performed on the endpoint. To update an endpoint, you must create a new EndpointConfig.

The endpoint name must be unique within an AWS Region in your AWS account.

When it receives the request, Amazon SageMaker creates the endpoint, launches the resources (ML compute instances), and deploys the model(s) on them.

When you call CreateEndpoint, a load call is made to DynamoDB to verify that your endpoint configuration exists. When you read data from a DynamoDB table supporting Eventually Consistent Reads , the response might not reflect the results of a recently completed write operation. The response might include some stale data. If the dependent entities are not yet in DynamoDB, this causes a validation error. If you repeat your read request after a short time, the response should return the latest data. So retry logic is recommended to handle these possible issues. We also recommend that customers call DescribeEndpointConfig before calling CreateEndpoint to minimize the potential impact of a DynamoDB eventually consistent read.

When Amazon SageMaker receives the request, it sets the endpoint status to Creating. After it creates the endpoint, it sets the status to InService. Amazon SageMaker can then process incoming requests for inferences. To check the status of an endpoint, use the DescribeEndpoint API.

If any of the models hosted at this endpoint get model data from an Amazon S3 location, Amazon SageMaker uses AWS Security Token Service to download model artifacts from the S3 path you provided. AWS STS is activated in your IAM user account by default. If you previously deactivated AWS STS for a region, you need to reactivate AWS STS for that region. For more information, see Activating and Deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide.

To add the IAM role policies for using this API operation, go to the IAM console, and choose Roles in the left navigation pane. Search the IAM role that you want to grant access to use the CreateEndpoint and CreateEndpointConfig API operations, add the following policies to the role.

  • Option 1: For a full Amazon SageMaker access, search and attach the AmazonSageMakerFullAccess policy.

  • Option 2: For granting a limited access to an IAM role, paste the following Action elements manually into the JSON file of the IAM role:

    \"Action\": [\"sagemaker:CreateEndpoint\", \"sagemaker:CreateEndpointConfig\"]

    \"Resource\": [

    \"arn:aws:sagemaker:region:account-id:endpoint/endpointName\"

    \"arn:aws:sagemaker:region:account-id:endpoint-config/endpointConfigName\"

    ]

    For more information, see Amazon SageMaker API Permissions: Actions, Permissions, and Resources Reference.

" }, "CreateEndpointConfig":{ "name":"CreateEndpointConfig", @@ -153,6 +219,20 @@ ], "documentation":"

Creates an SageMaker experiment. An experiment is a collection of trials that are observed, compared and evaluated as a group. A trial is a set of steps, called trial components, that produce a machine learning model.

The goal of an experiment is to determine the components that produce the best model. Multiple trials are performed, each one isolating and measuring the impact of a change to one or more inputs, while keeping the remaining inputs constant.

When you use Amazon SageMaker Studio or the Amazon SageMaker Python SDK, all experiments, trials, and trial components are automatically tracked, logged, and indexed. When you use the AWS SDK for Python (Boto), you must use the logging APIs provided by the SDK.

You can add tags to experiments, trials, trial components and then use the Search API to search for the tags.

To add a description to an experiment, specify the optional Description parameter. To add a description later, or to change the description, call the UpdateExperiment API.

To get a list of all your experiments, call the ListExperiments API. To view an experiment's properties, call the DescribeExperiment API. To get a list of all the trials associated with an experiment, call the ListTrials API. To create a trial call the CreateTrial API.

" }, + "CreateFeatureGroup":{ + "name":"CreateFeatureGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateFeatureGroupRequest"}, + "output":{"shape":"CreateFeatureGroupResponse"}, + "errors":[ + {"shape":"ResourceInUse"}, + {"shape":"ResourceLimitExceeded"} + ], + "documentation":"

Create a new FeatureGroup. A FeatureGroup is a group of Features defined in the FeatureStore to describe a Record.

The FeatureGroup defines the schema and features contained in the FeatureGroup. A FeatureGroup definition is composed of a list of Features, a RecordIdentifierFeatureName, an EventTimeFeatureName and configurations for its OnlineStore and OfflineStore. Check AWS service quotas to see the FeatureGroups quota for your AWS account.

You must include at least one of OnlineStoreConfig and OfflineStoreConfig to create a FeatureGroup.

" + }, "CreateFlowDefinition":{ "name":"CreateFlowDefinition", "http":{ @@ -195,6 +275,35 @@ ], "documentation":"

Starts a hyperparameter tuning job. A hyperparameter tuning job finds the best version of a model by running many training jobs on your dataset using the algorithm you choose and values for hyperparameters within ranges that you specify. It then chooses the hyperparameter values that result in a model that performs the best, as measured by an objective metric that you choose.

" }, + "CreateImage":{ + "name":"CreateImage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateImageRequest"}, + "output":{"shape":"CreateImageResponse"}, + "errors":[ + {"shape":"ResourceInUse"}, + {"shape":"ResourceLimitExceeded"} + ], + "documentation":"

Creates a custom SageMaker image. A SageMaker image is a set of image versions. Each image version represents a container image stored in Amazon Container Registry (ECR). For more information, see Bring your own SageMaker image.

" + }, + "CreateImageVersion":{ + "name":"CreateImageVersion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateImageVersionRequest"}, + "output":{"shape":"CreateImageVersionResponse"}, + "errors":[ + {"shape":"ResourceInUse"}, + {"shape":"ResourceLimitExceeded"}, + {"shape":"ResourceNotFound"} + ], + "documentation":"

Creates a version of the SageMaker image specified by ImageName. The version represents the Amazon Container Registry (ECR) container image specified by BaseImage.

" + }, "CreateLabelingJob":{ "name":"CreateLabelingJob", "http":{ @@ -230,7 +339,24 @@ }, "input":{"shape":"CreateModelPackageInput"}, "output":{"shape":"CreateModelPackageOutput"}, - "documentation":"

Creates a model package that you can use to create Amazon SageMaker models or list on AWS Marketplace. Buyers can subscribe to model packages listed on AWS Marketplace to create models in Amazon SageMaker.

To create a model package by specifying a Docker container that contains your inference code and the Amazon S3 location of your model artifacts, provide values for InferenceSpecification. To create a model from an algorithm resource that you created or subscribed to in AWS Marketplace, provide a value for SourceAlgorithmSpecification.

" + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ResourceLimitExceeded"} + ], + "documentation":"

Creates a model package that you can use to create Amazon SageMaker models or list on AWS Marketplace, or a versioned model that is part of a model group. Buyers can subscribe to model packages listed on AWS Marketplace to create models in Amazon SageMaker.

To create a model package by specifying a Docker container that contains your inference code and the Amazon S3 location of your model artifacts, provide values for InferenceSpecification. To create a model from an algorithm resource that you created or subscribed to in AWS Marketplace, provide a value for SourceAlgorithmSpecification.

There are two types of model packages:

  • Versioned - a model that is part of a model group in the model registry.

  • Unversioned - a model package that is not part of a model group.

" + }, + "CreateModelPackageGroup":{ + "name":"CreateModelPackageGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateModelPackageGroupInput"}, + "output":{"shape":"CreateModelPackageGroupOutput"}, + "errors":[ + {"shape":"ResourceLimitExceeded"} + ], + "documentation":"

Creates a model group. A model group contains a group of model versions.

" }, "CreateMonitoringSchedule":{ "name":"CreateMonitoringSchedule", @@ -272,6 +398,20 @@ ], "documentation":"

Creates a lifecycle configuration that you can associate with a notebook instance. A lifecycle configuration is a collection of shell scripts that run when you create or start a notebook instance.

Each lifecycle configuration script has a limit of 16384 characters.

The value of the $PATH environment variable that is available to both scripts is /sbin:bin:/usr/sbin:/usr/bin.

View CloudWatch Logs for notebook instance lifecycle configurations in log group /aws/sagemaker/NotebookInstances in log stream [notebook-instance-name]/[LifecycleConfigHook].

Lifecycle configuration scripts cannot run for longer than 5 minutes. If a script runs for longer than 5 minutes, it fails and the notebook instance is not created or started.

For information about notebook instance lifestyle configurations, see Step 2.1: (Optional) Customize a Notebook Instance.

" }, + "CreatePipeline":{ + "name":"CreatePipeline", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreatePipelineRequest"}, + "output":{"shape":"CreatePipelineResponse"}, + "errors":[ + {"shape":"ResourceNotFound"}, + {"shape":"ResourceLimitExceeded"} + ], + "documentation":"

Creates a pipeline using a JSON pipeline definition.

" + }, "CreatePresignedDomainUrl":{ "name":"CreatePresignedDomainUrl", "http":{ @@ -310,6 +450,19 @@ ], "documentation":"

Creates a processing job.

" }, + "CreateProject":{ + "name":"CreateProject", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateProjectInput"}, + "output":{"shape":"CreateProjectOutput"}, + "errors":[ + {"shape":"ResourceLimitExceeded"} + ], + "documentation":"

Creates a machine learning (ML) project that can contain one or more templates that set up an ML pipeline from training to deploying an approved model.

" + }, "CreateTrainingJob":{ "name":"CreateTrainingJob", "http":{ @@ -323,7 +476,7 @@ {"shape":"ResourceLimitExceeded"}, {"shape":"ResourceNotFound"} ], - "documentation":"

Starts a model training job. After training completes, Amazon SageMaker saves the resulting model artifacts to an Amazon S3 location that you specify.

If you choose to host your model using Amazon SageMaker hosting services, you can use the resulting model artifacts as part of the model. You can also use the artifacts in a machine learning service other than Amazon SageMaker, provided that you know how to use them for inferences.

In the request body, you provide the following:

  • AlgorithmSpecification - Identifies the training algorithm to use.

  • HyperParameters - Specify these algorithm-specific parameters to enable the estimation of model parameters during training. Hyperparameters can be tuned to optimize this learning process. For a list of hyperparameters for each training algorithm provided by Amazon SageMaker, see Algorithms.

  • InputDataConfig - Describes the training dataset and the Amazon S3, EFS, or FSx location where it is stored.

  • OutputDataConfig - Identifies the Amazon S3 bucket where you want Amazon SageMaker to save the results of model training.

  • ResourceConfig - Identifies the resources, ML compute instances, and ML storage volumes to deploy for model training. In distributed training, you specify more than one instance.

  • EnableManagedSpotTraining - Optimize the cost of training machine learning models by up to 80% by using Amazon EC2 Spot instances. For more information, see Managed Spot Training.

  • RoleARN - The Amazon Resource Number (ARN) that Amazon SageMaker assumes to perform tasks on your behalf during model training. You must grant this role the necessary permissions so that Amazon SageMaker can successfully complete model training.

  • StoppingCondition - To help cap training costs, use MaxRuntimeInSeconds to set a time limit for training. Use MaxWaitTimeInSeconds to specify how long you are willing to wait for a managed spot training job to complete.

For more information about Amazon SageMaker, see How It Works.

" + "documentation":"

Starts a model training job. After training completes, Amazon SageMaker saves the resulting model artifacts to an Amazon S3 location that you specify.

If you choose to host your model using Amazon SageMaker hosting services, you can use the resulting model artifacts as part of the model. You can also use the artifacts in a machine learning service other than Amazon SageMaker, provided that you know how to use them for inference.

In the request body, you provide the following:

  • AlgorithmSpecification - Identifies the training algorithm to use.

  • HyperParameters - Specify these algorithm-specific parameters to enable the estimation of model parameters during training. Hyperparameters can be tuned to optimize this learning process. For a list of hyperparameters for each training algorithm provided by Amazon SageMaker, see Algorithms.

  • InputDataConfig - Describes the training dataset and the Amazon S3, EFS, or FSx location where it is stored.

  • OutputDataConfig - Identifies the Amazon S3 bucket where you want Amazon SageMaker to save the results of model training.

  • ResourceConfig - Identifies the resources, ML compute instances, and ML storage volumes to deploy for model training. In distributed training, you specify more than one instance.

  • EnableManagedSpotTraining - Optimize the cost of training machine learning models by up to 80% by using Amazon EC2 Spot instances. For more information, see Managed Spot Training.

  • RoleArn - The Amazon Resource Number (ARN) that Amazon SageMaker assumes to perform tasks on your behalf during model training. You must grant this role the necessary permissions so that Amazon SageMaker can successfully complete model training.

  • StoppingCondition - To help cap training costs, use MaxRuntimeInSeconds to set a time limit for training. Use MaxWaitTimeInSeconds to specify how long you are willing to wait for a managed spot training job to complete.

For more information about Amazon SageMaker, see How It Works.

" }, "CreateTransformJob":{ "name":"CreateTransformJob", @@ -405,6 +558,19 @@ ], "documentation":"

Creates a new work team for labeling your data. A work team is defined by one or more Amazon Cognito user pools. You must first create the user pools before you can create a work team.

You cannot create more than 25 work teams in an account and region.

" }, + "DeleteAction":{ + "name":"DeleteAction", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteActionRequest"}, + "output":{"shape":"DeleteActionResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Deletes an action.

" + }, "DeleteAlgorithm":{ "name":"DeleteAlgorithm", "http":{ @@ -427,6 +593,44 @@ ], "documentation":"

Used to stop and delete an app.

" }, + "DeleteAppImageConfig":{ + "name":"DeleteAppImageConfig", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteAppImageConfigRequest"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Deletes an AppImageConfig.

" + }, + "DeleteArtifact":{ + "name":"DeleteArtifact", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteArtifactRequest"}, + "output":{"shape":"DeleteArtifactResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Deletes an artifact. Either ArtifactArn or Source must be specified.

" + }, + "DeleteAssociation":{ + "name":"DeleteAssociation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteAssociationRequest"}, + "output":{"shape":"DeleteAssociationResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Deletes an association.

" + }, "DeleteCodeRepository":{ "name":"DeleteCodeRepository", "http":{ @@ -436,6 +640,19 @@ "input":{"shape":"DeleteCodeRepositoryInput"}, "documentation":"

Deletes the specified Git repository from your account.

" }, + "DeleteContext":{ + "name":"DeleteContext", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteContextRequest"}, + "output":{"shape":"DeleteContextResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Deletes an context.

" + }, "DeleteDomain":{ "name":"DeleteDomain", "http":{ @@ -480,6 +697,18 @@ ], "documentation":"

Deletes an Amazon SageMaker experiment. All trials associated with the experiment must be deleted first. Use the ListTrials API to get a list of the trials associated with the experiment.

" }, + "DeleteFeatureGroup":{ + "name":"DeleteFeatureGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteFeatureGroupRequest"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Delete the FeatureGroup and any data that was written to the OnlineStore of the FeatureGroup. Data cannot be accessed from the OnlineStore immediately after DeleteFeatureGroup is called.

Data written into the OfflineStore will not be deleted. The AWS Glue database and tables that are automatically created for your OfflineStore are not deleted.

" + }, "DeleteFlowDefinition":{ "name":"DeleteFlowDefinition", "http":{ @@ -507,6 +736,34 @@ ], "documentation":"

Use this operation to delete a human task user interface (worker task template).

To see a list of human task user interfaces (work task templates) in your account, use . When you delete a worker task template, it no longer appears when you call ListHumanTaskUis.

" }, + "DeleteImage":{ + "name":"DeleteImage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteImageRequest"}, + "output":{"shape":"DeleteImageResponse"}, + "errors":[ + {"shape":"ResourceInUse"}, + {"shape":"ResourceNotFound"} + ], + "documentation":"

Deletes a SageMaker image and all versions of the image. The container images aren't deleted.

" + }, + "DeleteImageVersion":{ + "name":"DeleteImageVersion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteImageVersionRequest"}, + "output":{"shape":"DeleteImageVersionResponse"}, + "errors":[ + {"shape":"ResourceInUse"}, + {"shape":"ResourceNotFound"} + ], + "documentation":"

Deletes a version of a SageMaker image. The container image the version represents isn't deleted.

" + }, "DeleteModel":{ "name":"DeleteModel", "http":{ @@ -523,8 +780,29 @@ "requestUri":"/" }, "input":{"shape":"DeleteModelPackageInput"}, + "errors":[ + {"shape":"ConflictException"} + ], "documentation":"

Deletes a model package.

A model package is used to create Amazon SageMaker models or list on AWS Marketplace. Buyers can subscribe to model packages listed on AWS Marketplace to create models in Amazon SageMaker.

" }, + "DeleteModelPackageGroup":{ + "name":"DeleteModelPackageGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteModelPackageGroupInput"}, + "documentation":"

Deletes the specified model group.

" + }, + "DeleteModelPackageGroupPolicy":{ + "name":"DeleteModelPackageGroupPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteModelPackageGroupPolicyInput"}, + "documentation":"

Deletes a model group resource policy.

" + }, "DeleteMonitoringSchedule":{ "name":"DeleteMonitoringSchedule", "http":{ @@ -555,6 +833,28 @@ "input":{"shape":"DeleteNotebookInstanceLifecycleConfigInput"}, "documentation":"

Deletes a notebook instance lifecycle configuration.

" }, + "DeletePipeline":{ + "name":"DeletePipeline", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeletePipelineRequest"}, + "output":{"shape":"DeletePipelineResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Deletes a pipeline.

" + }, + "DeleteProject":{ + "name":"DeleteProject", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteProjectInput"}, + "documentation":"

Delete the specified project.

" + }, "DeleteTags":{ "name":"DeleteTags", "http":{ @@ -627,6 +927,19 @@ ], "documentation":"

Deletes an existing work team. This operation can't be undone.

" }, + "DescribeAction":{ + "name":"DescribeAction", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeActionRequest"}, + "output":{"shape":"DescribeActionResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Describes an action.

" + }, "DescribeAlgorithm":{ "name":"DescribeAlgorithm", "http":{ @@ -650,6 +963,32 @@ ], "documentation":"

Describes the app.

" }, + "DescribeAppImageConfig":{ + "name":"DescribeAppImageConfig", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAppImageConfigRequest"}, + "output":{"shape":"DescribeAppImageConfigResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Describes an AppImageConfig.

" + }, + "DescribeArtifact":{ + "name":"DescribeArtifact", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeArtifactRequest"}, + "output":{"shape":"DescribeArtifactResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Describes an artifact.

" + }, "DescribeAutoMLJob":{ "name":"DescribeAutoMLJob", "http":{ @@ -686,6 +1025,19 @@ ], "documentation":"

Returns information about a model compilation job.

To create a model compilation job, use CreateCompilationJob. To get information about multiple model compilation jobs, use ListCompilationJobs.

" }, + "DescribeContext":{ + "name":"DescribeContext", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeContextRequest"}, + "output":{"shape":"DescribeContextResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Describes a context.

" + }, "DescribeDomain":{ "name":"DescribeDomain", "http":{ @@ -732,6 +1084,19 @@ ], "documentation":"

Provides a list of an experiment's properties.

" }, + "DescribeFeatureGroup":{ + "name":"DescribeFeatureGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeFeatureGroupRequest"}, + "output":{"shape":"DescribeFeatureGroupResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Use this operation to describe a FeatureGroup. The response includes information on the creation time, FeatureGroup name, the unique identifier for each FeatureGroup, and more.

" + }, "DescribeFlowDefinition":{ "name":"DescribeFlowDefinition", "http":{ @@ -771,6 +1136,32 @@ ], "documentation":"

Gets a description of a hyperparameter tuning job.

" }, + "DescribeImage":{ + "name":"DescribeImage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeImageRequest"}, + "output":{"shape":"DescribeImageResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Describes a SageMaker image.

" + }, + "DescribeImageVersion":{ + "name":"DescribeImageVersion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeImageVersionRequest"}, + "output":{"shape":"DescribeImageVersionResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Describes a version of a SageMaker image.

" + }, "DescribeLabelingJob":{ "name":"DescribeLabelingJob", "http":{ @@ -804,6 +1195,16 @@ "output":{"shape":"DescribeModelPackageOutput"}, "documentation":"

Returns a description of the specified model package, which is used to create Amazon SageMaker models or list them on AWS Marketplace.

To create models in Amazon SageMaker, buyers can subscribe to model packages listed on AWS Marketplace.

" }, + "DescribeModelPackageGroup":{ + "name":"DescribeModelPackageGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeModelPackageGroupInput"}, + "output":{"shape":"DescribeModelPackageGroupOutput"}, + "documentation":"

Gets a description for the specified model group.

" + }, "DescribeMonitoringSchedule":{ "name":"DescribeMonitoringSchedule", "http":{ @@ -837,20 +1238,69 @@ "output":{"shape":"DescribeNotebookInstanceLifecycleConfigOutput"}, "documentation":"

Returns a description of a notebook instance lifecycle configuration.

For information about notebook instance lifestyle configurations, see Step 2.1: (Optional) Customize a Notebook Instance.

" }, - "DescribeProcessingJob":{ - "name":"DescribeProcessingJob", + "DescribePipeline":{ + "name":"DescribePipeline", "http":{ "method":"POST", "requestUri":"/" }, - "input":{"shape":"DescribeProcessingJobRequest"}, - "output":{"shape":"DescribeProcessingJobResponse"}, + "input":{"shape":"DescribePipelineRequest"}, + "output":{"shape":"DescribePipelineResponse"}, "errors":[ {"shape":"ResourceNotFound"} ], - "documentation":"

Returns a description of a processing job.

" + "documentation":"

Describes the details of a pipeline.

" }, - "DescribeSubscribedWorkteam":{ + "DescribePipelineDefinitionForExecution":{ + "name":"DescribePipelineDefinitionForExecution", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribePipelineDefinitionForExecutionRequest"}, + "output":{"shape":"DescribePipelineDefinitionForExecutionResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Describes the details of an execution's pipeline definition.

" + }, + "DescribePipelineExecution":{ + "name":"DescribePipelineExecution", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribePipelineExecutionRequest"}, + "output":{"shape":"DescribePipelineExecutionResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Describes the details of a pipeline execution.

" + }, + "DescribeProcessingJob":{ + "name":"DescribeProcessingJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeProcessingJobRequest"}, + "output":{"shape":"DescribeProcessingJobResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Returns a description of a processing job.

" + }, + "DescribeProject":{ + "name":"DescribeProject", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeProjectInput"}, + "output":{"shape":"DescribeProjectOutput"}, + "documentation":"

Describes the details of a project.

" + }, + "DescribeSubscribedWorkteam":{ "name":"DescribeSubscribedWorkteam", "http":{ "method":"POST", @@ -945,6 +1395,16 @@ "output":{"shape":"DescribeWorkteamResponse"}, "documentation":"

Gets information about a specific work team. You can see information such as the create date, the last updated date, membership information, and the work team's Amazon Resource Name (ARN).

" }, + "DisableSagemakerServicecatalogPortfolio":{ + "name":"DisableSagemakerServicecatalogPortfolio", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisableSagemakerServicecatalogPortfolioInput"}, + "output":{"shape":"DisableSagemakerServicecatalogPortfolioOutput"}, + "documentation":"

Disables using Service Catalog in SageMaker. Service Catalog is used to create SageMaker projects.

" + }, "DisassociateTrialComponent":{ "name":"DisassociateTrialComponent", "http":{ @@ -958,6 +1418,36 @@ ], "documentation":"

Disassociates a trial component from a trial. This doesn't effect other trials the component is associated with. Before you can delete a component, you must disassociate the component from all trials it is associated with. To associate a trial component with a trial, call the AssociateTrialComponent API.

To get a list of the trials a component is associated with, use the Search API. Specify ExperimentTrialComponent for the Resource parameter. The list appears in the response under Results.TrialComponent.Parents.

" }, + "EnableSagemakerServicecatalogPortfolio":{ + "name":"EnableSagemakerServicecatalogPortfolio", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableSagemakerServicecatalogPortfolioInput"}, + "output":{"shape":"EnableSagemakerServicecatalogPortfolioOutput"}, + "documentation":"

Enables using Service Catalog in SageMaker. Service Catalog is used to create SageMaker projects.

" + }, + "GetModelPackageGroupPolicy":{ + "name":"GetModelPackageGroupPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetModelPackageGroupPolicyInput"}, + "output":{"shape":"GetModelPackageGroupPolicyOutput"}, + "documentation":"

Gets a resource policy that manages access for a model group. For information about resource policies, see Identity-based policies and resource-based policies in the AWS Identity and Access Management User Guide..

" + }, + "GetSagemakerServicecatalogPortfolioStatus":{ + "name":"GetSagemakerServicecatalogPortfolioStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetSagemakerServicecatalogPortfolioStatusInput"}, + "output":{"shape":"GetSagemakerServicecatalogPortfolioStatusOutput"}, + "documentation":"

Gets the status of Service Catalog in SageMaker. Service Catalog is used to create SageMaker projects.

" + }, "GetSearchSuggestions":{ "name":"GetSearchSuggestions", "http":{ @@ -968,6 +1458,19 @@ "output":{"shape":"GetSearchSuggestionsResponse"}, "documentation":"

An auto-complete API for the search functionality in the Amazon SageMaker console. It returns suggestions of possible matches for the property name to use in Search queries. Provides suggestions for HyperParameters, Tags, and Metrics.

" }, + "ListActions":{ + "name":"ListActions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListActionsRequest"}, + "output":{"shape":"ListActionsResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Lists the actions in your account and their properties.

" + }, "ListAlgorithms":{ "name":"ListAlgorithms", "http":{ @@ -978,6 +1481,16 @@ "output":{"shape":"ListAlgorithmsOutput"}, "documentation":"

Lists the machine learning algorithms that have been created.

" }, + "ListAppImageConfigs":{ + "name":"ListAppImageConfigs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAppImageConfigsRequest"}, + "output":{"shape":"ListAppImageConfigsResponse"}, + "documentation":"

Lists the AppImageConfigs in your account and their properties. The list can be filtered by creation time or modified time, and whether the AppImageConfig name contains a specified string.

" + }, "ListApps":{ "name":"ListApps", "http":{ @@ -988,6 +1501,32 @@ "output":{"shape":"ListAppsResponse"}, "documentation":"

Lists apps.

" }, + "ListArtifacts":{ + "name":"ListArtifacts", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListArtifactsRequest"}, + "output":{"shape":"ListArtifactsResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Lists the artifacts in your account and their properties.

" + }, + "ListAssociations":{ + "name":"ListAssociations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAssociationsRequest"}, + "output":{"shape":"ListAssociationsResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Lists the associations in your account and their properties.

" + }, "ListAutoMLJobs":{ "name":"ListAutoMLJobs", "http":{ @@ -1031,6 +1570,19 @@ "output":{"shape":"ListCompilationJobsResponse"}, "documentation":"

Lists model compilation jobs that satisfy various filters.

To create a model compilation job, use CreateCompilationJob. To get information about a particular model compilation job you have created, use DescribeCompilationJob.

" }, + "ListContexts":{ + "name":"ListContexts", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListContextsRequest"}, + "output":{"shape":"ListContextsResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Lists the contexts in your account and their properties.

" + }, "ListDomains":{ "name":"ListDomains", "http":{ @@ -1071,6 +1623,16 @@ "output":{"shape":"ListExperimentsResponse"}, "documentation":"

Lists all the experiments in your account. The list can be filtered to show only experiments that were created in a specific time range. The list can be sorted by experiment name or creation time.

" }, + "ListFeatureGroups":{ + "name":"ListFeatureGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListFeatureGroupsRequest"}, + "output":{"shape":"ListFeatureGroupsResponse"}, + "documentation":"

List FeatureGroups based on given filter and order.

" + }, "ListFlowDefinitions":{ "name":"ListFlowDefinitions", "http":{ @@ -1101,6 +1663,29 @@ "output":{"shape":"ListHyperParameterTuningJobsResponse"}, "documentation":"

Gets a list of HyperParameterTuningJobSummary objects that describe the hyperparameter tuning jobs launched in your account.

" }, + "ListImageVersions":{ + "name":"ListImageVersions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListImageVersionsRequest"}, + "output":{"shape":"ListImageVersionsResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Lists the versions of a specified image and their properties. The list can be filtered by creation time or modified time.

" + }, + "ListImages":{ + "name":"ListImages", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListImagesRequest"}, + "output":{"shape":"ListImagesResponse"}, + "documentation":"

Lists the images in your account and their properties. The list can be filtered by creation time or modified time, and whether the image name contains a specified string.

" + }, "ListLabelingJobs":{ "name":"ListLabelingJobs", "http":{ @@ -1124,6 +1709,16 @@ ], "documentation":"

Gets a list of labeling jobs assigned to a specified work team.

" }, + "ListModelPackageGroups":{ + "name":"ListModelPackageGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListModelPackageGroupsInput"}, + "output":{"shape":"ListModelPackageGroupsOutput"}, + "documentation":"

Gets a list of the model groups in your AWS account.

" + }, "ListModelPackages":{ "name":"ListModelPackages", "http":{ @@ -1184,6 +1779,55 @@ "output":{"shape":"ListNotebookInstancesOutput"}, "documentation":"

Returns a list of the Amazon SageMaker notebook instances in the requester's account in an AWS Region.

" }, + "ListPipelineExecutionSteps":{ + "name":"ListPipelineExecutionSteps", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListPipelineExecutionStepsRequest"}, + "output":{"shape":"ListPipelineExecutionStepsResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Gets a list of PipeLineExecutionStep objects.

" + }, + "ListPipelineExecutions":{ + "name":"ListPipelineExecutions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListPipelineExecutionsRequest"}, + "output":{"shape":"ListPipelineExecutionsResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Gets a list of the pipeline executions.

" + }, + "ListPipelineParametersForExecution":{ + "name":"ListPipelineParametersForExecution", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListPipelineParametersForExecutionRequest"}, + "output":{"shape":"ListPipelineParametersForExecutionResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Gets a list of parameters for a pipeline execution.

" + }, + "ListPipelines":{ + "name":"ListPipelines", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListPipelinesRequest"}, + "output":{"shape":"ListPipelinesResponse"}, + "documentation":"

Gets a list of pipelines.

" + }, "ListProcessingJobs":{ "name":"ListProcessingJobs", "http":{ @@ -1194,6 +1838,16 @@ "output":{"shape":"ListProcessingJobsResponse"}, "documentation":"

Lists processing jobs that satisfy various filters.

" }, + "ListProjects":{ + "name":"ListProjects", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListProjectsInput"}, + "output":{"shape":"ListProjectsOutput"}, + "documentation":"

Gets a list of the projects in an AWS account.

" + }, "ListSubscribedWorkteams":{ "name":"ListSubscribedWorkteams", "http":{ @@ -1303,6 +1957,16 @@ "output":{"shape":"ListWorkteamsResponse"}, "documentation":"

Gets a list of private work teams that you have defined in a region. The list may be empty if no work team satisfies the filter specified in the NameContains parameter.

" }, + "PutModelPackageGroupPolicy":{ + "name":"PutModelPackageGroupPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutModelPackageGroupPolicyInput"}, + "output":{"shape":"PutModelPackageGroupPolicyOutput"}, + "documentation":"

Adds a resouce policy to control access to a model group. For information about resoure policies, see Identity-based policies and resource-based policies in the AWS Identity and Access Management User Guide..

" + }, "RenderUiTemplate":{ "name":"RenderUiTemplate", "http":{ @@ -1350,6 +2014,20 @@ ], "documentation":"

Launches an ML compute instance with the latest version of the libraries and attaches your ML storage volume. After configuring the notebook instance, Amazon SageMaker sets the notebook instance status to InService. A notebook instance's status must be InService before you can connect to your Jupyter notebook.

" }, + "StartPipelineExecution":{ + "name":"StartPipelineExecution", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartPipelineExecutionRequest"}, + "output":{"shape":"StartPipelineExecutionResponse"}, + "errors":[ + {"shape":"ResourceNotFound"}, + {"shape":"ResourceLimitExceeded"} + ], + "documentation":"

Starts a pipeline execution.

" + }, "StopAutoMLJob":{ "name":"StopAutoMLJob", "http":{ @@ -1419,6 +2097,19 @@ "input":{"shape":"StopNotebookInstanceInput"}, "documentation":"

Terminates the ML compute instance. Before terminating the instance, Amazon SageMaker disconnects the ML storage volume from it. Amazon SageMaker preserves the ML storage volume. Amazon SageMaker stops charging you for the ML compute instance when you call StopNotebookInstance.

To access data on the ML storage volume for a notebook instance that has been terminated, call the StartNotebookInstance API. StartNotebookInstance launches another ML compute instance, configures it, and attaches the preserved ML storage volume so you can continue your work.

" }, + "StopPipelineExecution":{ + "name":"StopPipelineExecution", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopPipelineExecutionRequest"}, + "output":{"shape":"StopPipelineExecutionResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Stops a pipeline execution.

" + }, "StopProcessingJob":{ "name":"StopProcessingJob", "http":{ @@ -1455,6 +2146,47 @@ ], "documentation":"

Stops a transform job.

When Amazon SageMaker receives a StopTransformJob request, the status of the job changes to Stopping. After Amazon SageMaker stops the job, the status is set to Stopped. When you stop a transform job before it is completed, Amazon SageMaker doesn't store the job's output in Amazon S3.

" }, + "UpdateAction":{ + "name":"UpdateAction", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateActionRequest"}, + "output":{"shape":"UpdateActionResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ResourceNotFound"} + ], + "documentation":"

Updates an action.

" + }, + "UpdateAppImageConfig":{ + "name":"UpdateAppImageConfig", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateAppImageConfigRequest"}, + "output":{"shape":"UpdateAppImageConfigResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Updates the properties of an AppImageConfig.

" + }, + "UpdateArtifact":{ + "name":"UpdateArtifact", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateArtifactRequest"}, + "output":{"shape":"UpdateArtifactResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ResourceNotFound"} + ], + "documentation":"

Updates an artifact.

" + }, "UpdateCodeRepository":{ "name":"UpdateCodeRepository", "http":{ @@ -1465,6 +2197,20 @@ "output":{"shape":"UpdateCodeRepositoryOutput"}, "documentation":"

Updates the specified Git repository with the specified values.

" }, + "UpdateContext":{ + "name":"UpdateContext", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateContextRequest"}, + "output":{"shape":"UpdateContextResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ResourceNotFound"} + ], + "documentation":"

Updates a context.

" + }, "UpdateDomain":{ "name":"UpdateDomain", "http":{ @@ -1520,6 +2266,30 @@ ], "documentation":"

Adds, updates, or removes the description of an experiment. Updates the display name of an experiment.

" }, + "UpdateImage":{ + "name":"UpdateImage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateImageRequest"}, + "output":{"shape":"UpdateImageResponse"}, + "errors":[ + {"shape":"ResourceInUse"}, + {"shape":"ResourceNotFound"} + ], + "documentation":"

Updates the properties of a SageMaker image. To change the image's tags, use the AddTags and DeleteTags APIs.

" + }, + "UpdateModelPackage":{ + "name":"UpdateModelPackage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateModelPackageInput"}, + "output":{"shape":"UpdateModelPackageOutput"}, + "documentation":"

Updates a versioned model.

" + }, "UpdateMonitoringSchedule":{ "name":"UpdateMonitoringSchedule", "http":{ @@ -1560,6 +2330,32 @@ ], "documentation":"

Updates a notebook instance lifecycle configuration created with the CreateNotebookInstanceLifecycleConfig API.

" }, + "UpdatePipeline":{ + "name":"UpdatePipeline", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdatePipelineRequest"}, + "output":{"shape":"UpdatePipelineResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Updates a pipeline.

" + }, + "UpdatePipelineExecution":{ + "name":"UpdatePipelineExecution", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdatePipelineExecutionRequest"}, + "output":{"shape":"UpdatePipelineExecutionResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Updates a pipeline execution.

" + }, "UpdateTrial":{ "name":"UpdateTrial", "http":{ @@ -1637,24 +2433,131 @@ "type":"string", "pattern":"^\\d+$" }, - "AddTagsInput":{ + "ActionArn":{ + "type":"string", + "max":256, + "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:action/.*" + }, + "ActionSource":{ "type":"structure", - "required":[ - "ResourceArn", - "Tags" - ], + "required":["SourceUri"], "members":{ - "ResourceArn":{ - "shape":"ResourceArn", - "documentation":"

The Amazon Resource Name (ARN) of the resource that you want to tag.

" + "SourceUri":{ + "shape":"String2048", + "documentation":"

The URI of the source.

" }, - "Tags":{ - "shape":"TagList", - "documentation":"

An array of Tag objects. Each tag is a key-value pair. Only the key parameter is required. If you don't specify a value, Amazon SageMaker sets the value to an empty string.

" + "SourceType":{ + "shape":"String256", + "documentation":"

The type of the source.

" + }, + "SourceId":{ + "shape":"String256", + "documentation":"

The ID of the source.

" } - } + }, + "documentation":"

A structure describing the source of an action.

" }, - "AddTagsOutput":{ + "ActionStatus":{ + "type":"string", + "enum":[ + "Unknown", + "InProgress", + "Completed", + "Failed", + "Stopping", + "Stopped" + ] + }, + "ActionSummaries":{ + "type":"list", + "member":{"shape":"ActionSummary"} + }, + "ActionSummary":{ + "type":"structure", + "members":{ + "ActionArn":{ + "shape":"ActionArn", + "documentation":"

The Amazon Resource Name (ARN) of the action.

" + }, + "ActionName":{ + "shape":"ExperimentEntityName", + "documentation":"

The name of the action.

" + }, + "Source":{ + "shape":"ActionSource", + "documentation":"

The source of the action.

" + }, + "ActionType":{ + "shape":"String64", + "documentation":"

The type of the action.

" + }, + "Status":{ + "shape":"ActionStatus", + "documentation":"

The status of the action.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

When the action was created.

" + }, + "LastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

When the action was last modified.

" + } + }, + "documentation":"

Lists the properties of an action. An action represents an action or activity. Some examples are a workflow step and a model deployment. Generally, an action involves at least one input artifact or output artifact.

" + }, + "AddAssociationRequest":{ + "type":"structure", + "required":[ + "SourceArn", + "DestinationArn" + ], + "members":{ + "SourceArn":{ + "shape":"AssociationEntityArn", + "documentation":"

The ARN of the source.

" + }, + "DestinationArn":{ + "shape":"AssociationEntityArn", + "documentation":"

The Amazon Resource Name (ARN) of the destination.

" + }, + "AssociationType":{ + "shape":"AssociationEdgeType", + "documentation":"

The type of association. The following are suggested uses for each type. Amazon SageMaker places no restrictions on their use.

  • ContributedTo - The source contributed to the destination or had a part in enabling the destination. For example, the training data contributed to the training job.

  • AssociatedWith - The source is connected to the destination. For example, an approval workflow is associated with a model deployment.

  • DerivedFrom - The destination is a modification of the source. For example, a digest output of a channel input for a processing job is derived from the original inputs.

  • Produced - The source generated the destination. For example, a training job produced a model artifact.

" + } + } + }, + "AddAssociationResponse":{ + "type":"structure", + "members":{ + "SourceArn":{ + "shape":"AssociationEntityArn", + "documentation":"

The ARN of the source.

" + }, + "DestinationArn":{ + "shape":"AssociationEntityArn", + "documentation":"

The Amazon Resource Name (ARN) of the destination.

" + } + } + }, + "AddTagsInput":{ + "type":"structure", + "required":[ + "ResourceArn", + "Tags" + ], + "members":{ + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the resource that you want to tag.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

An array of key-value pairs. You can use tags to categorize your AWS resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging AWS Resources.

" + } + } + }, + "AddTagsOutput":{ "type":"structure", "members":{ "Tags":{ @@ -1668,6 +2571,28 @@ "member":{"shape":"CodeRepositoryNameOrUrl"}, "max":3 }, + "Alarm":{ + "type":"structure", + "members":{ + "AlarmName":{ + "shape":"AlarmName", + "documentation":"

" + } + }, + "documentation":"

This API is not supported.

" + }, + "AlarmList":{ + "type":"list", + "member":{"shape":"Alarm"}, + "max":10, + "min":1 + }, + "AlarmName":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^(?!\\s*$).+" + }, "AlgorithmArn":{ "type":"string", "max":2048, @@ -1889,7 +2814,55 @@ "documentation":"

The creation time.

" } }, - "documentation":"

The app's details.

" + "documentation":"

Details about an Amazon SageMaker app.

" + }, + "AppImageConfigArn":{ + "type":"string", + "max":256, + "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:app-image-config/.*" + }, + "AppImageConfigDetails":{ + "type":"structure", + "members":{ + "AppImageConfigArn":{ + "shape":"AppImageConfigArn", + "documentation":"

The Amazon Resource Name (ARN) of the AppImageConfig.

" + }, + "AppImageConfigName":{ + "shape":"AppImageConfigName", + "documentation":"

The name of the AppImageConfig. Must be unique to your account.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

When the AppImageConfig was created.

" + }, + "LastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

When the AppImageConfig was last modified.

" + }, + "KernelGatewayImageConfig":{ + "shape":"KernelGatewayImageConfig", + "documentation":"

The configuration for the file system and kernels in the SageMaker image.

" + } + }, + "documentation":"

The configuration for running a SageMaker image as a KernelGateway app.

" + }, + "AppImageConfigList":{ + "type":"list", + "member":{"shape":"AppImageConfigDetails"} + }, + "AppImageConfigName":{ + "type":"string", + "max":63, + "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" + }, + "AppImageConfigSortKey":{ + "type":"string", + "enum":[ + "CreationTime", + "LastModifiedTime", + "Name" + ] }, "AppInstanceType":{ "type":"string", @@ -1932,10 +2905,11 @@ "type":"list", "member":{"shape":"AppDetails"} }, + "AppManaged":{"type":"boolean"}, "AppName":{ "type":"string", "max":63, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9])*" + "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" }, "AppNetworkAccessType":{ "type":"string", @@ -1985,12 +2959,106 @@ "TensorBoard" ] }, + "ApprovalDescription":{ + "type":"string", + "max":1024, + "pattern":".*" + }, "ArnOrName":{ "type":"string", "max":170, "min":1, "pattern":"(arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:[a-z\\-]*\\/)?([a-zA-Z0-9]([a-zA-Z0-9-]){0,62})(?The URI of the source.

" + }, + "SourceTypes":{ + "shape":"ArtifactSourceTypes", + "documentation":"

A list of source types.

" + } + }, + "documentation":"

A structure describing the source of an artifact.

" + }, + "ArtifactSourceIdType":{ + "type":"string", + "enum":[ + "MD5Hash", + "S3ETag", + "S3Version", + "Custom" + ] + }, + "ArtifactSourceType":{ + "type":"structure", + "required":[ + "SourceIdType", + "Value" + ], + "members":{ + "SourceIdType":{ + "shape":"ArtifactSourceIdType", + "documentation":"

The type of ID.

" + }, + "Value":{ + "shape":"String256", + "documentation":"

The ID.

" + } + }, + "documentation":"

The ID and ID type of an artifact source.

" + }, + "ArtifactSourceTypes":{ + "type":"list", + "member":{"shape":"ArtifactSourceType"} + }, + "ArtifactSummaries":{ + "type":"list", + "member":{"shape":"ArtifactSummary"} + }, + "ArtifactSummary":{ + "type":"structure", + "members":{ + "ArtifactArn":{ + "shape":"ArtifactArn", + "documentation":"

The Amazon Resource Name (ARN) of the artifact.

" + }, + "ArtifactName":{ + "shape":"ExperimentEntityName", + "documentation":"

The name of the artifact.

" + }, + "Source":{ + "shape":"ArtifactSource", + "documentation":"

The source of the artifact.

" + }, + "ArtifactType":{ + "shape":"String256", + "documentation":"

The type of the artifact.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

When the artifact was created.

" + }, + "LastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

When the artifact was last modified.

" + } + }, + "documentation":"

Lists a summary of the properties of an artifact. An artifact represents a URI addressable object or data. Some examples are a dataset and a model.

" + }, "AssemblyType":{ "type":"string", "enum":[ @@ -2028,6 +3096,138 @@ } } }, + "AssociationEdgeType":{ + "type":"string", + "enum":[ + "ContributedTo", + "AssociatedWith", + "DerivedFrom", + "Produced" + ] + }, + "AssociationEntityArn":{ + "type":"string", + "max":256, + "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:(experiment|experiment-trial-component|artifact|action|context)/.*" + }, + "AssociationSummaries":{ + "type":"list", + "member":{"shape":"AssociationSummary"} + }, + "AssociationSummary":{ + "type":"structure", + "members":{ + "SourceArn":{ + "shape":"AssociationEntityArn", + "documentation":"

The ARN of the source.

" + }, + "DestinationArn":{ + "shape":"AssociationEntityArn", + "documentation":"

The Amazon Resource Name (ARN) of the destination.

" + }, + "SourceType":{ + "shape":"String256", + "documentation":"

The source type.

" + }, + "DestinationType":{ + "shape":"String256", + "documentation":"

The destination type.

" + }, + "AssociationType":{ + "shape":"AssociationEdgeType", + "documentation":"

The type of the association.

" + }, + "SourceName":{ + "shape":"ExperimentEntityName", + "documentation":"

The name of the source.

" + }, + "DestinationName":{ + "shape":"ExperimentEntityName", + "documentation":"

The name of the destination.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

When the association was created.

" + }, + "CreatedBy":{"shape":"UserContext"} + }, + "documentation":"

Lists a summary of the properties of an association. An association is an entity that links other lineage or experiment entities. An example would be an association between a training job and a model.

" + }, + "AthenaCatalog":{ + "type":"string", + "documentation":"

The name of the data catalog used in Athena query execution.

", + "max":256, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" + }, + "AthenaDatabase":{ + "type":"string", + "documentation":"

The name of the database used in the Athena query execution.

", + "max":255, + "min":1, + "pattern":".*" + }, + "AthenaDatasetDefinition":{ + "type":"structure", + "required":[ + "Catalog", + "Database", + "QueryString", + "OutputS3Uri", + "OutputFormat" + ], + "members":{ + "Catalog":{"shape":"AthenaCatalog"}, + "Database":{"shape":"AthenaDatabase"}, + "QueryString":{"shape":"AthenaQueryString"}, + "WorkGroup":{"shape":"AthenaWorkGroup"}, + "OutputS3Uri":{ + "shape":"S3Uri", + "documentation":"

The location in Amazon S3 where Athena query results are stored.

" + }, + "KmsKeyId":{ + "shape":"KmsKeyId", + "documentation":"

The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt data generated from an Athena query execution.

" + }, + "OutputFormat":{"shape":"AthenaResultFormat"}, + "OutputCompression":{"shape":"AthenaResultCompressionType"} + }, + "documentation":"

Configuration for Athena Dataset Definition input.

" + }, + "AthenaQueryString":{ + "type":"string", + "documentation":"

The SQL query statements, to be executed.

", + "max":4096, + "min":1, + "pattern":"[\\s\\S]+" + }, + "AthenaResultCompressionType":{ + "type":"string", + "documentation":"

The compression used for Athena query results.

", + "enum":[ + "GZIP", + "SNAPPY", + "ZLIB" + ] + }, + "AthenaResultFormat":{ + "type":"string", + "documentation":"

The data storage format for Athena query results.

", + "enum":[ + "PARQUET", + "ORC", + "AVRO", + "JSON", + "TEXTFILE" + ] + }, + "AthenaWorkGroup":{ + "type":"string", + "documentation":"

The name of the workgroup in which the Athena query is being started.

", + "max":128, + "min":1, + "pattern":"[a-zA-Z0-9._-]+" + }, "AttributeName":{ "type":"string", "max":256, @@ -2250,7 +3450,7 @@ "type":"string", "max":32, "min":1, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9])*" + "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,31}" }, "AutoMLJobObjective":{ "type":"structure", @@ -2438,6 +3638,16 @@ "Descending" ] }, + "AutoRollbackConfig":{ + "type":"structure", + "members":{ + "Alarms":{ + "shape":"AlarmList", + "documentation":"

" + } + }, + "documentation":"

Currently, the AutoRollbackConfig API is not supported.

" + }, "AwsManagedHumanLoopRequestSource":{ "type":"string", "enum":[ @@ -2452,10 +3662,43 @@ "SingleRecord" ] }, + "Bias":{ + "type":"structure", + "members":{ + "Report":{ + "shape":"MetricsSource", + "documentation":"

The bias report for a model

" + } + }, + "documentation":"

Contains bias metrics for a model.

" + }, "BillableTimeInSeconds":{ "type":"integer", "min":1 }, + "BlockedReason":{ + "type":"string", + "max":1024 + }, + "BlueGreenUpdatePolicy":{ + "type":"structure", + "required":["TrafficRoutingConfiguration"], + "members":{ + "TrafficRoutingConfiguration":{ + "shape":"TrafficRoutingConfig", + "documentation":"

" + }, + "TerminationWaitInSeconds":{ + "shape":"TerminationWaitInSeconds", + "documentation":"

" + }, + "MaximumExecutionTimeoutInSeconds":{ + "shape":"MaximumExecutionTimeoutInSeconds", + "documentation":"

" + } + }, + "documentation":"

Currently, the BlueGreenUpdatePolicy API is not supported.

" + }, "Boolean":{"type":"boolean"}, "BooleanOperator":{ "type":"string", @@ -2470,6 +3713,16 @@ "min":1, "pattern":"[^ ~^:?*\\[]+" }, + "CacheHitResult":{ + "type":"structure", + "members":{ + "SourcePipelineExecutionArn":{ + "shape":"PipelineExecutionArn", + "documentation":"

The Amazon Resource Name (ARN) of the pipeline execution.

" + } + }, + "documentation":"

Details on the cache hit of a pipeline execution step.

" + }, "CandidateDefinitionNotebookLocation":{ "type":"string", "min":1 @@ -2520,10 +3773,39 @@ "type":"list", "member":{"shape":"AutoMLCandidateStep"} }, - "CaptureContentTypeHeader":{ + "CapacitySize":{ "type":"structure", - "members":{ - "CsvContentTypes":{ + "required":[ + "Type", + "Value" + ], + "members":{ + "Type":{ + "shape":"CapacitySizeType", + "documentation":"

This API is not supported.

" + }, + "Value":{ + "shape":"CapacitySizeValue", + "documentation":"

" + } + }, + "documentation":"

Currently, the CapacitySize API is not supported.

" + }, + "CapacitySizeType":{ + "type":"string", + "enum":[ + "INSTANCE_COUNT", + "CAPACITY_PERCENT" + ] + }, + "CapacitySizeValue":{ + "type":"integer", + "min":1 + }, + "CaptureContentTypeHeader":{ + "type":"structure", + "members":{ + "CsvContentTypes":{ "shape":"CsvContentTypes", "documentation":"

" }, @@ -2565,6 +3847,12 @@ "Stopped" ] }, + "Catalog":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" + }, "CategoricalParameterRange":{ "type":"structure", "required":[ @@ -2729,6 +4017,12 @@ "pattern":"[\\w+=/-]+", "sensitive":true }, + "ClientToken":{ + "type":"string", + "max":36, + "min":1, + "pattern":"^[a-zA-Z0-9-]+$" + }, "CodeRepositoryArn":{ "type":"string", "max":2048, @@ -2967,8 +4261,8 @@ "CompilerOptions":{ "type":"string", "max":1024, - "min":7, - "pattern":"^\\{.+\\}$" + "min":3, + "pattern":".*" }, "CompressionType":{ "type":"string", @@ -2981,6 +4275,23 @@ "type":"list", "member":{"shape":"CompressionType"} }, + "ConditionOutcome":{ + "type":"string", + "enum":[ + "True", + "False" + ] + }, + "ConditionStepMetadata":{ + "type":"structure", + "members":{ + "Outcome":{ + "shape":"ConditionOutcome", + "documentation":"

The outcome of the Condition step evaluation.

" + } + }, + "documentation":"

Metadata for a Condition step.

" + }, "ConfigKey":{ "type":"string", "max":256, @@ -3039,7 +4350,7 @@ "documentation":"

The environment variables to set in the Docker container. Each key and value in the Environment string to string map can have length of up to 1024. We support up to 16 entries in the map.

" }, "ModelPackageName":{ - "shape":"ArnOrName", + "shape":"VersionedArnOrName", "documentation":"

The name or Amazon Resource Name (ARN) of the model package to use to create the model.

" } }, @@ -3064,7 +4375,7 @@ "ContainerHostname":{ "type":"string", "max":63, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9])*" + "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" }, "ContainerImage":{ "type":"string", @@ -3090,6 +4401,11 @@ "member":{"shape":"ContentClassifier"}, "max":256 }, + "ContentDigest":{ + "type":"string", + "max":72, + "pattern":"^[Ss][Hh][Aa]256:[0-9a-fA-F]{64}$" + }, "ContentType":{ "type":"string", "max":256, @@ -3099,6 +4415,64 @@ "type":"list", "member":{"shape":"ContentType"} }, + "ContextArn":{ + "type":"string", + "max":256, + "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:context/.*" + }, + "ContextSource":{ + "type":"structure", + "required":["SourceUri"], + "members":{ + "SourceUri":{ + "shape":"String2048", + "documentation":"

The URI of the source.

" + }, + "SourceType":{ + "shape":"String256", + "documentation":"

The type of the source.

" + }, + "SourceId":{ + "shape":"String256", + "documentation":"

The ID of the source.

" + } + }, + "documentation":"

A structure describing the source of a context.

" + }, + "ContextSummaries":{ + "type":"list", + "member":{"shape":"ContextSummary"} + }, + "ContextSummary":{ + "type":"structure", + "members":{ + "ContextArn":{ + "shape":"ContextArn", + "documentation":"

The Amazon Resource Name (ARN) of the context.

" + }, + "ContextName":{ + "shape":"ExperimentEntityName", + "documentation":"

The name of the context.

" + }, + "Source":{ + "shape":"ContextSource", + "documentation":"

The source of the context.

" + }, + "ContextType":{ + "shape":"String256", + "documentation":"

The type of the context.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

When the context was created.

" + }, + "LastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

When the context was last modified.

" + } + }, + "documentation":"

Lists a summary of the properties of a context. A context provides a logical grouping of other entities.

" + }, "ContinuousParameterRange":{ "type":"structure", "required":[ @@ -3150,6 +4524,54 @@ "max":20, "min":0 }, + "CreateActionRequest":{ + "type":"structure", + "required":[ + "ActionName", + "Source", + "ActionType" + ], + "members":{ + "ActionName":{ + "shape":"ExperimentEntityName", + "documentation":"

The name of the action. Must be unique to your account in an AWS Region.

" + }, + "Source":{ + "shape":"ActionSource", + "documentation":"

The source type, ID, and URI.

" + }, + "ActionType":{ + "shape":"String256", + "documentation":"

The action type.

" + }, + "Description":{ + "shape":"ExperimentDescription", + "documentation":"

The description of the action.

" + }, + "Status":{ + "shape":"ActionStatus", + "documentation":"

The status of the action.

" + }, + "Properties":{ + "shape":"LineageEntityParameters", + "documentation":"

A list of properties to add to the action.

" + }, + "MetadataProperties":{"shape":"MetadataProperties"}, + "Tags":{ + "shape":"TagList", + "documentation":"

A list of tags to apply to the action.

" + } + } + }, + "CreateActionResponse":{ + "type":"structure", + "members":{ + "ActionArn":{ + "shape":"ActionArn", + "documentation":"

The Amazon Resource Name (ARN) of the action.

" + } + } + }, "CreateAlgorithmInput":{ "type":"structure", "required":[ @@ -3180,6 +4602,10 @@ "CertifyForMarketplace":{ "shape":"CertifyForMarketplace", "documentation":"

Whether to certify the algorithm so that it can be listed in AWS Marketplace.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

An array of key-value pairs. You can use tags to categorize your AWS resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging AWS Resources.

" } } }, @@ -3193,6 +4619,33 @@ } } }, + "CreateAppImageConfigRequest":{ + "type":"structure", + "required":["AppImageConfigName"], + "members":{ + "AppImageConfigName":{ + "shape":"AppImageConfigName", + "documentation":"

The name of the AppImageConfig. Must be unique to your account.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

A list of tags to apply to the AppImageConfig.

" + }, + "KernelGatewayImageConfig":{ + "shape":"KernelGatewayImageConfig", + "documentation":"

The KernelGatewayImageConfig.

" + } + } + }, + "CreateAppImageConfigResponse":{ + "type":"structure", + "members":{ + "AppImageConfigArn":{ + "shape":"AppImageConfigArn", + "documentation":"

The Amazon Resource Name (ARN) of the AppImageConfig.

" + } + } + }, "CreateAppRequest":{ "type":"structure", "required":[ @@ -3233,7 +4686,46 @@ "members":{ "AppArn":{ "shape":"AppArn", - "documentation":"

The App's Amazon Resource Name (ARN).

" + "documentation":"

The Amazon Resource Name (ARN) of the app.

" + } + } + }, + "CreateArtifactRequest":{ + "type":"structure", + "required":[ + "Source", + "ArtifactType" + ], + "members":{ + "ArtifactName":{ + "shape":"ExperimentEntityName", + "documentation":"

The name of the artifact. Must be unique to your account in an AWS Region.

" + }, + "Source":{ + "shape":"ArtifactSource", + "documentation":"

The ID, ID type, and URI of the source.

" + }, + "ArtifactType":{ + "shape":"String256", + "documentation":"

The artifact type.

" + }, + "Properties":{ + "shape":"LineageEntityParameters", + "documentation":"

A list of properties to add to the artifact.

" + }, + "MetadataProperties":{"shape":"MetadataProperties"}, + "Tags":{ + "shape":"TagList", + "documentation":"

A list of tags to apply to the artifact.

" + } + } + }, + "CreateArtifactResponse":{ + "type":"structure", + "members":{ + "ArtifactArn":{ + "shape":"ArtifactArn", + "documentation":"

The Amazon Resource Name (ARN) of the artifact.

" } } }, @@ -3308,6 +4800,10 @@ "GitConfig":{ "shape":"GitConfig", "documentation":"

Specifies details about the repository, including the URL where the repository is located, the default branch, and credentials to use to access the repository.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

An array of key-value pairs. You can use tags to categorize your AWS resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging AWS Resources.

" } } }, @@ -3350,6 +4846,10 @@ "StoppingCondition":{ "shape":"StoppingCondition", "documentation":"

Specifies a limit to how long a model compilation job can run. When the job reaches the time limit, Amazon SageMaker ends the compilation job. Use this API to cap model training costs.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

An array of key-value pairs. You can use tags to categorize your AWS resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging AWS Resources.

" } } }, @@ -3363,6 +4863,49 @@ } } }, + "CreateContextRequest":{ + "type":"structure", + "required":[ + "ContextName", + "Source", + "ContextType" + ], + "members":{ + "ContextName":{ + "shape":"ExperimentEntityName", + "documentation":"

The name of the context. Must be unique to your account in an AWS Region.

" + }, + "Source":{ + "shape":"ContextSource", + "documentation":"

The source type, ID, and URI.

" + }, + "ContextType":{ + "shape":"String256", + "documentation":"

The context type.

" + }, + "Description":{ + "shape":"ExperimentDescription", + "documentation":"

The description of the context.

" + }, + "Properties":{ + "shape":"LineageEntityParameters", + "documentation":"

A list of properties to add to the context.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

A list of tags to apply to the context.

" + } + } + }, + "CreateContextResponse":{ + "type":"structure", + "members":{ + "ContextArn":{ + "shape":"ContextArn", + "documentation":"

The Amazon Resource Name (ARN) of the context.

" + } + } + }, "CreateDomainRequest":{ "type":"structure", "required":[ @@ -3397,13 +4940,19 @@ "shape":"TagList", "documentation":"

Tags to associated with the Domain. Each tag consists of a key and an optional value. Tag keys must be unique per resource. Tags are searchable using the Search API.

" }, - "HomeEfsFileSystemKmsKeyId":{ - "shape":"KmsKeyId", - "documentation":"

The AWS Key Management Service (KMS) encryption key ID. Encryption with a customer master key (CMK) is not supported.

" - }, "AppNetworkAccessType":{ "shape":"AppNetworkAccessType", "documentation":"

Specifies the VPC used for non-EFS traffic. The default value is PublicInternetOnly.

  • PublicInternetOnly - Non-EFS traffic is through a VPC managed by Amazon SageMaker, which allows direct internet access

  • VpcOnly - All Studio traffic is through the specified VPC and subnets

" + }, + "HomeEfsFileSystemKmsKeyId":{ + "shape":"KmsKeyId", + "documentation":"

This member is deprecated and replaced with KmsKeyId.

", + "deprecated":true, + "deprecatedMessage":"This property is deprecated, use KmsKeyId instead." + }, + "KmsKeyId":{ + "shape":"KmsKeyId", + "documentation":"

SageMaker uses AWS KMS to encrypt the EFS volume attached to the domain with an AWS managed customer master key (CMK) by default. For more control, specify a customer managed CMK.

" } } }, @@ -3438,7 +4987,7 @@ "DataCaptureConfig":{"shape":"DataCaptureConfig"}, "Tags":{ "shape":"TagList", - "documentation":"

A list of key-value pairs. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide.

" + "documentation":"

An array of key-value pairs. You can use tags to categorize your AWS resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging AWS Resources.

" }, "KmsKeyId":{ "shape":"KmsKeyId", @@ -3465,7 +5014,7 @@ "members":{ "EndpointName":{ "shape":"EndpointName", - "documentation":"

The name of the endpoint. The name must be unique within an AWS Region in your AWS account.

" + "documentation":"

The name of the endpoint.The name must be unique within an AWS Region in your AWS account. The name is case-insensitive in CreateEndpoint, but the case is preserved and must be matched in .

" }, "EndpointConfigName":{ "shape":"EndpointConfigName", @@ -3473,7 +5022,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

An array of key-value pairs. For more information, see Using Cost Allocation Tagsin the AWS Billing and Cost Management User Guide.

" + "documentation":"

An array of key-value pairs. You can use tags to categorize your AWS resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging AWS Resources.

" } } }, @@ -3518,6 +5067,63 @@ } } }, + "CreateFeatureGroupRequest":{ + "type":"structure", + "required":[ + "FeatureGroupName", + "RecordIdentifierFeatureName", + "EventTimeFeatureName", + "FeatureDefinitions" + ], + "members":{ + "FeatureGroupName":{ + "shape":"FeatureGroupName", + "documentation":"

The name of the FeatureGroup. The name must be unique within an AWS Region in an AWS account. The name:

  • Must start and end with an alphanumeric character.

  • Can only contain alphanumeric character and hyphens. Spaces are not allowed.

" + }, + "RecordIdentifierFeatureName":{ + "shape":"FeatureName", + "documentation":"

The name of the Feature whose value uniquely identifies a Record defined in the FeatureStore. Only the latest record per identifier value will be stored in the OnlineStore. RecordIdentifierFeatureName must be one of feature definitions' names.

You use the RecordIdentifierFeatureName to access data in a FeatureStore.

This name:

  • Must start and end with an alphanumeric character.

  • Can only contains alphanumeric characters, hyphens, underscores. Spaces are not allowed.

" + }, + "EventTimeFeatureName":{ + "shape":"FeatureName", + "documentation":"

The name of the feature that stores the EventTime of a Record in a FeatureGroup.

An EventTime is a point in time when a new event occurs that corresponds to the creation or update of a Record in a FeatureGroup. All Records in the FeatureGroup must have a corresponding EventTime.

An EventTime can be a String or Fractional.

  • Fractional: EventTime feature values must be a Unix timestamp in seconds.

  • String: EventTime feature values must be an ISO-8601 string in the format. The following formats are supported yyyy-MM-dd'T'HH:mm:ssZ and yyyy-MM-dd'T'HH:mm:ss.SSSZ where yyyy, MM, and dd represent the year, month, and day respectively and HH, mm, ss, and if applicable, SSS represent the hour, month, second and milliseconds respsectively. 'T' and Z are constants.

" + }, + "FeatureDefinitions":{ + "shape":"FeatureDefinitions", + "documentation":"

A list of Feature names and types. Name and Type is compulsory per Feature.

Valid feature FeatureTypes are Integral, Fractional and String.

FeatureNames cannot be any of the following: is_deleted, write_time, api_invocation_time

You can create up to 2,500 FeatureDefinitions per FeatureGroup.

" + }, + "OnlineStoreConfig":{ + "shape":"OnlineStoreConfig", + "documentation":"

You can turn the OnlineStore on or off by specifying True for the EnableOnlineStore flag in OnlineStoreConfig; the default value is False.

You can also include an AWS KMS key ID (KMSKeyId) for at-rest encryption of the OnlineStore.

" + }, + "OfflineStoreConfig":{ + "shape":"OfflineStoreConfig", + "documentation":"

Use this to configure an OfflineFeatureStore. This parameter allows you to specify:

  • The Amazon Simple Storage Service (Amazon S3) location of an OfflineStore.

  • A configuration for an AWS Glue or AWS Hive data cataolgue.

  • An KMS encryption key to encrypt the Amazon S3 location used for OfflineStore.

To learn more about this parameter, see OfflineStoreConfig.

" + }, + "RoleArn":{ + "shape":"RoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the IAM execution role used to persist data into the OfflineStore if an OfflineStoreConfig is provided.

" + }, + "Description":{ + "shape":"Description", + "documentation":"

A free-form description of a FeatureGroup.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

Tags used to identify Features in each FeatureGroup.

" + } + } + }, + "CreateFeatureGroupResponse":{ + "type":"structure", + "required":["FeatureGroupArn"], + "members":{ + "FeatureGroupArn":{ + "shape":"FeatureGroupArn", + "documentation":"

The Amazon Resource Name (ARN) of the FeatureGroup. This is a unique identifier for the feature group.

" + } + } + }, "CreateFlowDefinitionRequest":{ "type":"structure", "required":[ @@ -3604,7 +5210,7 @@ "members":{ "HyperParameterTuningJobName":{ "shape":"HyperParameterTuningJobName", - "documentation":"

The name of the tuning job. This name is the prefix for the names of all training jobs that this tuning job launches. The name must be unique within the same AWS account and AWS Region. The name must have { } to { } characters. Valid characters are a-z, A-Z, 0-9, and : + = @ _ % - (hyphen). The name is not case sensitive.

" + "documentation":"

The name of the tuning job. This name is the prefix for the names of all training jobs that this tuning job launches. The name must be unique within the same AWS account and AWS Region. The name must have 1 to 32 characters. Valid characters are a-z, A-Z, 0-9, and : + = @ _ % - (hyphen). The name is not case sensitive.

" }, "HyperParameterTuningJobConfig":{ "shape":"HyperParameterTuningJobConfig", @@ -3624,7 +5230,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

An array of key-value pairs. You can use tags to categorize your AWS resources in different ways, for example, by purpose, owner, or environment. For more information, see AWS Tagging Strategies.

Tags that you specify for the tuning job are also added to all training jobs that the tuning job launches.

" + "documentation":"

An array of key-value pairs. You can use tags to categorize your AWS resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging AWS Resources.

Tags that you specify for the tuning job are also added to all training jobs that the tuning job launches.

" } } }, @@ -3638,6 +5244,76 @@ } } }, + "CreateImageRequest":{ + "type":"structure", + "required":[ + "ImageName", + "RoleArn" + ], + "members":{ + "Description":{ + "shape":"ImageDescription", + "documentation":"

The description of the image.

" + }, + "DisplayName":{ + "shape":"ImageDisplayName", + "documentation":"

The display name of the image. If not provided, ImageName is displayed.

" + }, + "ImageName":{ + "shape":"ImageName", + "documentation":"

The name of the image. Must be unique to your account.

" + }, + "RoleArn":{ + "shape":"RoleArn", + "documentation":"

The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker to perform tasks on your behalf.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

A list of tags to apply to the image.

" + } + } + }, + "CreateImageResponse":{ + "type":"structure", + "members":{ + "ImageArn":{ + "shape":"ImageArn", + "documentation":"

The Amazon Resource Name (ARN) of the image.

" + } + } + }, + "CreateImageVersionRequest":{ + "type":"structure", + "required":[ + "BaseImage", + "ClientToken", + "ImageName" + ], + "members":{ + "BaseImage":{ + "shape":"ImageBaseImage", + "documentation":"

The registry path of the container image to use as the starting point for this version. The path is an Amazon Container Registry (ECR) URI in the following format:

<acct-id>.dkr.ecr.<region>.amazonaws.com/<repo-name[:tag] or [@digest]>

" + }, + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

A unique ID. If not specified, the AWS CLI and AWS SDKs, such as the SDK for Python (Boto3), add a unique value to the call.

", + "idempotencyToken":true + }, + "ImageName":{ + "shape":"ImageName", + "documentation":"

The ImageName of the Image to create a version of.

" + } + } + }, + "CreateImageVersionResponse":{ + "type":"structure", + "members":{ + "ImageVersionArn":{ + "shape":"ImageVersionArn", + "documentation":"

The Amazon Resource Name (ARN) of the image version.

" + } + } + }, "CreateLabelingJobRequest":{ "type":"structure", "required":[ @@ -3671,7 +5347,7 @@ }, "LabelCategoryConfigS3Uri":{ "shape":"S3Uri", - "documentation":"

The S3 URL of the file that defines the categories used to label the data objects.

For 3D point cloud task types, see Create a Labeling Category Configuration File for 3D Point Cloud Labeling Jobs.

For all other built-in task types and custom tasks, your label category configuration file must be a JSON file in the following format. Identify the labels you want to use by replacing label_1, label_2,...,label_n with your label categories.

{

\"document-version\": \"2018-11-28\"

\"labels\": [

{

\"label\": \"label_1\"

},

{

\"label\": \"label_2\"

},

...

{

\"label\": \"label_n\"

}

]

}

" + "documentation":"

The S3 URI of the file that defines the categories used to label the data objects.

For 3D point cloud task types, see Create a Labeling Category Configuration File for 3D Point Cloud Labeling Jobs.

For all other built-in task types and custom tasks, your label category configuration file must be a JSON file in the following format. Identify the labels you want to use by replacing label_1, label_2,...,label_n with your label categories.

{

\"document-version\": \"2018-11-28\"

\"labels\": [

{

\"label\": \"label_1\"

},

{

\"label\": \"label_2\"

},

...

{

\"label\": \"label_n\"

}

]

}

" }, "StoppingConditions":{ "shape":"LabelingJobStoppingConditions", @@ -3726,7 +5402,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

An array of key-value pairs. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide.

" + "documentation":"

An array of key-value pairs. You can use tags to categorize your AWS resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging AWS Resources.

" }, "VpcConfig":{ "shape":"VpcConfig", @@ -3748,24 +5424,55 @@ } } }, - "CreateModelPackageInput":{ + "CreateModelPackageGroupInput":{ "type":"structure", - "required":["ModelPackageName"], + "required":["ModelPackageGroupName"], "members":{ - "ModelPackageName":{ + "ModelPackageGroupName":{ "shape":"EntityName", - "documentation":"

The name of the model package. The name must have 1 to 63 characters. Valid characters are a-z, A-Z, 0-9, and - (hyphen).

" + "documentation":"

The name of the model group.

" }, - "ModelPackageDescription":{ + "ModelPackageGroupDescription":{ "shape":"EntityDescription", - "documentation":"

A description of the model package.

" - }, - "InferenceSpecification":{ - "shape":"InferenceSpecification", - "documentation":"

Specifies details about inference jobs that can be run with models based on this model package, including the following:

  • The Amazon ECR paths of containers that contain the inference code and model artifacts.

  • The instance types that the model package supports for transform jobs and real-time endpoints used for inference.

  • The input and output content formats that the model package supports for inference.

" + "documentation":"

A description for the model group.

" }, - "ValidationSpecification":{ - "shape":"ModelPackageValidationSpecification", + "Tags":{ + "shape":"TagList", + "documentation":"

A list of key value pairs associated with the model group. For more information, see Tagging AWS resources in the AWS General Reference Guide.

" + } + } + }, + "CreateModelPackageGroupOutput":{ + "type":"structure", + "required":["ModelPackageGroupArn"], + "members":{ + "ModelPackageGroupArn":{ + "shape":"ModelPackageGroupArn", + "documentation":"

The Amazon Resource Name (ARN) of the model group.

" + } + } + }, + "CreateModelPackageInput":{ + "type":"structure", + "members":{ + "ModelPackageName":{ + "shape":"EntityName", + "documentation":"

The name of the model package. The name must have 1 to 63 characters. Valid characters are a-z, A-Z, 0-9, and - (hyphen).

This parameter is required for unversioned models. It is not applicable to versioned models.

" + }, + "ModelPackageGroupName":{ + "shape":"EntityName", + "documentation":"

The name of the model group that this model version belongs to.

This parameter is required for versioned models, and does not apply to unversioned models.

" + }, + "ModelPackageDescription":{ + "shape":"EntityDescription", + "documentation":"

A description of the model package.

" + }, + "InferenceSpecification":{ + "shape":"InferenceSpecification", + "documentation":"

Specifies details about inference jobs that can be run with models based on this model package, including the following:

  • The Amazon ECR paths of containers that contain the inference code and model artifacts.

  • The instance types that the model package supports for transform jobs and real-time endpoints used for inference.

  • The input and output content formats that the model package supports for inference.

" + }, + "ValidationSpecification":{ + "shape":"ModelPackageValidationSpecification", "documentation":"

Specifies configurations for one or more transform jobs that Amazon SageMaker runs to test the model package.

" }, "SourceAlgorithmSpecification":{ @@ -3774,7 +5481,25 @@ }, "CertifyForMarketplace":{ "shape":"CertifyForMarketplace", - "documentation":"

Whether to certify the model package for listing on AWS Marketplace.

" + "documentation":"

Whether to certify the model package for listing on AWS Marketplace.

This parameter is optional for unversioned models, and does not apply to versioned models.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

A list of key value pairs associated with the model. For more information, see Tagging AWS resources in the AWS General Reference Guide.

" + }, + "ModelApprovalStatus":{ + "shape":"ModelApprovalStatus", + "documentation":"

Whether the model is approved for deployment.

This parameter is optional for versioned models, and does not apply to unversioned models.

For versioned models, the value of this parameter must be set to Approved to deploy the model.

" + }, + "MetadataProperties":{"shape":"MetadataProperties"}, + "ModelMetrics":{ + "shape":"ModelMetrics", + "documentation":"

A structure that contains model metrics reports.

" + }, + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

A unique token that guarantees that the call to this API is idempotent.

", + "idempotencyToken":true } } }, @@ -3853,7 +5578,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

A list of tags to associate with the notebook instance. You can add tags later by using the CreateTags API.

" + "documentation":"

An array of key-value pairs. You can use tags to categorize your AWS resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging AWS Resources.

" }, "LifecycleConfigName":{ "shape":"NotebookInstanceLifecycleConfigName", @@ -3921,6 +5646,55 @@ } } }, + "CreatePipelineRequest":{ + "type":"structure", + "required":[ + "PipelineName", + "PipelineDefinition", + "ClientRequestToken", + "RoleArn" + ], + "members":{ + "PipelineName":{ + "shape":"PipelineName", + "documentation":"

The name of the pipeline.

" + }, + "PipelineDisplayName":{ + "shape":"PipelineName", + "documentation":"

The display name of the pipeline.

" + }, + "PipelineDefinition":{ + "shape":"PipelineDefinition", + "documentation":"

The JSON pipeline definition of the pipeline.

" + }, + "PipelineDescription":{ + "shape":"PipelineDescription", + "documentation":"

A description of the pipeline.

" + }, + "ClientRequestToken":{ + "shape":"IdempotencyToken", + "documentation":"

A unique, case-sensitive identifier that you provide to ensure the idempotency of the operation. An idempotent operation completes no more than one time.

", + "idempotencyToken":true + }, + "RoleArn":{ + "shape":"RoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the role used by the pipeline to access and create resources.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

A list of tags to apply to the created pipeline.

" + } + } + }, + "CreatePipelineResponse":{ + "type":"structure", + "members":{ + "PipelineArn":{ + "shape":"PipelineArn", + "documentation":"

The Amazon Resource Name (ARN) of the created pipeline.

" + } + } + }, "CreatePresignedDomainUrlRequest":{ "type":"structure", "required":[ @@ -3985,7 +5759,7 @@ "members":{ "ProcessingInputs":{ "shape":"ProcessingInputs", - "documentation":"

For each input, data is downloaded from S3 into the processing container before the processing job begins running if \"S3InputMode\" is set to File.

" + "documentation":"

List of input configurations for the processing job.

" }, "ProcessingOutputConfig":{ "shape":"ProcessingOutputConfig", @@ -4036,6 +5810,48 @@ } } }, + "CreateProjectInput":{ + "type":"structure", + "required":[ + "ProjectName", + "ServiceCatalogProvisioningDetails" + ], + "members":{ + "ProjectName":{ + "shape":"ProjectEntityName", + "documentation":"

The name of the project.

" + }, + "ProjectDescription":{ + "shape":"EntityDescription", + "documentation":"

A description for the project.

" + }, + "ServiceCatalogProvisioningDetails":{ + "shape":"ServiceCatalogProvisioningDetails", + "documentation":"

The product ID and provisioning artifact ID to provision a service catalog. For information, see What is AWS Service Catalog.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

An array of key-value pairs that you want to use to organize and track your AWS resource costs. For more information, see Tagging AWS resources in the AWS General Reference Guide.

" + } + } + }, + "CreateProjectOutput":{ + "type":"structure", + "required":[ + "ProjectArn", + "ProjectId" + ], + "members":{ + "ProjectArn":{ + "shape":"ProjectArn", + "documentation":"

The Amazon Resource Name (ARN) of the project.

" + }, + "ProjectId":{ + "shape":"ProjectId", + "documentation":"

The ID of the new project.

" + } + } + }, "CreateTrainingJobRequest":{ "type":"structure", "required":[ @@ -4085,7 +5901,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

An array of key-value pairs. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide.

" + "documentation":"

An array of key-value pairs. You can use tags to categorize your AWS resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging AWS Resources.

" }, "EnableNetworkIsolation":{ "shape":"Boolean", @@ -4229,6 +6045,7 @@ "shape":"TrialComponentArtifacts", "documentation":"

The output artifacts for the component. Examples of output artifacts are metrics, snapshots, logs, and images.

" }, + "MetadataProperties":{"shape":"MetadataProperties"}, "Tags":{ "shape":"TagList", "documentation":"

A list of tags to associate with the component. You can use Search API to search on the tags.

" @@ -4263,6 +6080,7 @@ "shape":"ExperimentEntityName", "documentation":"

The name of the experiment to associate the trial with.

" }, + "MetadataProperties":{"shape":"MetadataProperties"}, "Tags":{ "shape":"TagList", "documentation":"

A list of tags to associate with the trial. You can use Search API to search on the tags.

" @@ -4409,6 +6227,34 @@ "max":10, "min":1 }, + "CustomImage":{ + "type":"structure", + "required":[ + "ImageName", + "AppImageConfigName" + ], + "members":{ + "ImageName":{ + "shape":"ImageName", + "documentation":"

The name of the CustomImage. Must be unique to your account.

" + }, + "ImageVersionNumber":{ + "shape":"ImageVersionNumber", + "documentation":"

The version number of the CustomImage.

", + "box":true + }, + "AppImageConfigName":{ + "shape":"AppImageConfigName", + "documentation":"

The name of the AppImageConfig.

" + } + }, + "documentation":"

A custom SageMaker image. For more information, see Bring your own SageMaker image.

" + }, + "CustomImages":{ + "type":"list", + "member":{"shape":"CustomImage"}, + "max":30 + }, "DataCaptureConfig":{ "type":"structure", "required":[ @@ -4477,6 +6323,36 @@ }, "documentation":"

" }, + "DataCatalogConfig":{ + "type":"structure", + "required":[ + "TableName", + "Catalog", + "Database" + ], + "members":{ + "TableName":{ + "shape":"TableName", + "documentation":"

The name of the Glue table.

" + }, + "Catalog":{ + "shape":"Catalog", + "documentation":"

The name of the Glue table catalog.

" + }, + "Database":{ + "shape":"Database", + "documentation":"

The name of the Glue table database.

" + } + }, + "documentation":"

The meta data of the Glue table which serves as data catalog for the OfflineStore.

" + }, + "DataDistributionType":{ + "type":"string", + "enum":[ + "FullyReplicated", + "ShardedByS3Key" + ] + }, "DataExplorationNotebookLocation":{ "type":"string", "min":1 @@ -4519,6 +6395,32 @@ }, "documentation":"

Describes the location of the channel data.

" }, + "Database":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" + }, + "DatasetDefinition":{ + "type":"structure", + "members":{ + "AthenaDatasetDefinition":{"shape":"AthenaDatasetDefinition"}, + "RedshiftDatasetDefinition":{"shape":"RedshiftDatasetDefinition"}, + "LocalPath":{ + "shape":"ProcessingLocalPath", + "documentation":"

The local path where you want Amazon SageMaker to download the Dataset Definition inputs to run a processing job. LocalPath is an absolute path to the input data. This is a required parameter when AppManaged is False (default).

" + }, + "DataDistributionType":{ + "shape":"DataDistributionType", + "documentation":"

Whether the generated dataset is FullyReplicated or ShardedByS3Key (default).

" + }, + "InputMode":{ + "shape":"InputMode", + "documentation":"

Whether to use File or Pipe input mode. In File (default) mode, Amazon SageMaker copies the data from the input source onto the local Amazon Elastic Block Store (Amazon EBS) volumes before starting your training algorithm. This is the most commonly used input mode. In Pipe mode, Amazon SageMaker streams input data from the source directly to your algorithm without using the EBS volume.

" + } + }, + "documentation":"

Configuration for Dataset Definition inputs. The Dataset Definition input must specify exactly one of either AthenaDatasetDefinition or RedshiftDatasetDefinition types.

" + }, "DebugHookConfig":{ "type":"structure", "required":["S3OutputPath"], @@ -4618,6 +6520,35 @@ "max":20, "min":0 }, + "DefaultGid":{ + "type":"integer", + "max":65535, + "min":0 + }, + "DefaultUid":{ + "type":"integer", + "max":65535, + "min":0 + }, + "DeleteActionRequest":{ + "type":"structure", + "required":["ActionName"], + "members":{ + "ActionName":{ + "shape":"ExperimentEntityName", + "documentation":"

The name of the action to delete.

" + } + } + }, + "DeleteActionResponse":{ + "type":"structure", + "members":{ + "ActionArn":{ + "shape":"ActionArn", + "documentation":"

The Amazon Resource Name (ARN) of the action.

" + } + } + }, "DeleteAlgorithmInput":{ "type":"structure", "required":["AlgorithmName"], @@ -4628,6 +6559,16 @@ } } }, + "DeleteAppImageConfigRequest":{ + "type":"structure", + "required":["AppImageConfigName"], + "members":{ + "AppImageConfigName":{ + "shape":"AppImageConfigName", + "documentation":"

The name of the AppImageConfig to delete.

" + } + } + }, "DeleteAppRequest":{ "type":"structure", "required":[ @@ -4655,6 +6596,58 @@ } } }, + "DeleteArtifactRequest":{ + "type":"structure", + "members":{ + "ArtifactArn":{ + "shape":"ArtifactArn", + "documentation":"

The Amazon Resource Name (ARN) of the artifact to delete.

" + }, + "Source":{ + "shape":"ArtifactSource", + "documentation":"

The URI of the source.

" + } + } + }, + "DeleteArtifactResponse":{ + "type":"structure", + "members":{ + "ArtifactArn":{ + "shape":"ArtifactArn", + "documentation":"

The Amazon Resource Name (ARN) of the artifact.

" + } + } + }, + "DeleteAssociationRequest":{ + "type":"structure", + "required":[ + "SourceArn", + "DestinationArn" + ], + "members":{ + "SourceArn":{ + "shape":"AssociationEntityArn", + "documentation":"

The ARN of the source.

" + }, + "DestinationArn":{ + "shape":"AssociationEntityArn", + "documentation":"

The Amazon Resource Name (ARN) of the destination.

" + } + } + }, + "DeleteAssociationResponse":{ + "type":"structure", + "members":{ + "SourceArn":{ + "shape":"AssociationEntityArn", + "documentation":"

The ARN of the source.

" + }, + "DestinationArn":{ + "shape":"AssociationEntityArn", + "documentation":"

The Amazon Resource Name (ARN) of the destination.

" + } + } + }, "DeleteCodeRepositoryInput":{ "type":"structure", "required":["CodeRepositoryName"], @@ -4665,6 +6658,25 @@ } } }, + "DeleteContextRequest":{ + "type":"structure", + "required":["ContextName"], + "members":{ + "ContextName":{ + "shape":"ExperimentEntityName", + "documentation":"

The name of the context to delete.

" + } + } + }, + "DeleteContextResponse":{ + "type":"structure", + "members":{ + "ContextArn":{ + "shape":"ContextArn", + "documentation":"

The Amazon Resource Name (ARN) of the context.

" + } + } + }, "DeleteDomainRequest":{ "type":"structure", "required":["DomainId"], @@ -4718,6 +6730,16 @@ } } }, + "DeleteFeatureGroupRequest":{ + "type":"structure", + "required":["FeatureGroupName"], + "members":{ + "FeatureGroupName":{ + "shape":"FeatureGroupName", + "documentation":"

The name of the FeatureGroup you want to delete. The name must be unique within an AWS Region in an AWS account.

" + } + } + }, "DeleteFlowDefinitionRequest":{ "type":"structure", "required":["FlowDefinitionName"], @@ -4748,6 +6770,43 @@ "members":{ } }, + "DeleteImageRequest":{ + "type":"structure", + "required":["ImageName"], + "members":{ + "ImageName":{ + "shape":"ImageName", + "documentation":"

The name of the image to delete.

" + } + } + }, + "DeleteImageResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteImageVersionRequest":{ + "type":"structure", + "required":[ + "ImageName", + "Version" + ], + "members":{ + "ImageName":{ + "shape":"ImageName", + "documentation":"

The name of the image.

" + }, + "Version":{ + "shape":"ImageVersionNumber", + "documentation":"

The version to delete.

" + } + } + }, + "DeleteImageVersionResponse":{ + "type":"structure", + "members":{ + } + }, "DeleteModelInput":{ "type":"structure", "required":["ModelName"], @@ -4758,12 +6817,32 @@ } } }, + "DeleteModelPackageGroupInput":{ + "type":"structure", + "required":["ModelPackageGroupName"], + "members":{ + "ModelPackageGroupName":{ + "shape":"ArnOrName", + "documentation":"

The name of the model group to delete.

" + } + } + }, + "DeleteModelPackageGroupPolicyInput":{ + "type":"structure", + "required":["ModelPackageGroupName"], + "members":{ + "ModelPackageGroupName":{ + "shape":"EntityName", + "documentation":"

The name of the model group for which to delete the policy.

" + } + } + }, "DeleteModelPackageInput":{ "type":"structure", "required":["ModelPackageName"], "members":{ "ModelPackageName":{ - "shape":"EntityName", + "shape":"VersionedArnOrName", "documentation":"

The name of the model package. The name must have 1 to 63 characters. Valid characters are a-z, A-Z, 0-9, and - (hyphen).

" } } @@ -4798,6 +6877,43 @@ } } }, + "DeletePipelineRequest":{ + "type":"structure", + "required":[ + "PipelineName", + "ClientRequestToken" + ], + "members":{ + "PipelineName":{ + "shape":"PipelineName", + "documentation":"

The name of the pipeline to delete.

" + }, + "ClientRequestToken":{ + "shape":"IdempotencyToken", + "documentation":"

A unique, case-sensitive identifier that you provide to ensure the idempotency of the operation. An idempotent operation completes no more than one time.

", + "idempotencyToken":true + } + } + }, + "DeletePipelineResponse":{ + "type":"structure", + "members":{ + "PipelineArn":{ + "shape":"PipelineArn", + "documentation":"

The Amazon Resource Name (ARN) of the pipeline to delete.

" + } + } + }, + "DeleteProjectInput":{ + "type":"structure", + "required":["ProjectName"], + "members":{ + "ProjectName":{ + "shape":"ProjectEntityName", + "documentation":"

The name of the project to delete.

" + } + } + }, "DeleteTagsInput":{ "type":"structure", "required":[ @@ -4932,21 +7048,90 @@ "type":"list", "member":{"shape":"DeployedImage"} }, - "DescribeAlgorithmInput":{ + "DeploymentConfig":{ "type":"structure", - "required":["AlgorithmName"], + "required":["BlueGreenUpdatePolicy"], "members":{ - "AlgorithmName":{ - "shape":"ArnOrName", - "documentation":"

The name of the algorithm to describe.

" + "BlueGreenUpdatePolicy":{ + "shape":"BlueGreenUpdatePolicy", + "documentation":"

" + }, + "AutoRollbackConfiguration":{ + "shape":"AutoRollbackConfig", + "documentation":"

" + } + }, + "documentation":"

Currently, the DeploymentConfig API is not supported.

" + }, + "DescribeActionRequest":{ + "type":"structure", + "required":["ActionName"], + "members":{ + "ActionName":{ + "shape":"ExperimentEntityName", + "documentation":"

The name of the action to describe.

" } } }, - "DescribeAlgorithmOutput":{ + "DescribeActionResponse":{ "type":"structure", - "required":[ - "AlgorithmName", - "AlgorithmArn", + "members":{ + "ActionName":{ + "shape":"ExperimentEntityName", + "documentation":"

The name of the action.

" + }, + "ActionArn":{ + "shape":"ActionArn", + "documentation":"

The Amazon Resource Name (ARN) of the action.

" + }, + "Source":{ + "shape":"ActionSource", + "documentation":"

The source of the action.

" + }, + "ActionType":{ + "shape":"String256", + "documentation":"

The type of the action.

" + }, + "Description":{ + "shape":"ExperimentDescription", + "documentation":"

The description of the action.

" + }, + "Status":{ + "shape":"ActionStatus", + "documentation":"

The status of the action.

" + }, + "Properties":{ + "shape":"LineageEntityParameters", + "documentation":"

A list of the action's properties.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

When the action was created.

" + }, + "CreatedBy":{"shape":"UserContext"}, + "LastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

When the action was last modified.

" + }, + "LastModifiedBy":{"shape":"UserContext"}, + "MetadataProperties":{"shape":"MetadataProperties"} + } + }, + "DescribeAlgorithmInput":{ + "type":"structure", + "required":["AlgorithmName"], + "members":{ + "AlgorithmName":{ + "shape":"ArnOrName", + "documentation":"

The name of the algorithm to describe.

" + } + } + }, + "DescribeAlgorithmOutput":{ + "type":"structure", + "required":[ + "AlgorithmName", + "AlgorithmArn", "CreationTime", "TrainingSpecification", "AlgorithmStatus", @@ -4999,6 +7184,41 @@ } } }, + "DescribeAppImageConfigRequest":{ + "type":"structure", + "required":["AppImageConfigName"], + "members":{ + "AppImageConfigName":{ + "shape":"AppImageConfigName", + "documentation":"

The name of the AppImageConfig to describe.

" + } + } + }, + "DescribeAppImageConfigResponse":{ + "type":"structure", + "members":{ + "AppImageConfigArn":{ + "shape":"AppImageConfigArn", + "documentation":"

The Amazon Resource Name (ARN) of the AppImageConfig.

" + }, + "AppImageConfigName":{ + "shape":"AppImageConfigName", + "documentation":"

The name of the AppImageConfig.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

When the AppImageConfig was created.

" + }, + "LastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

When the AppImageConfig was last modified.

" + }, + "KernelGatewayImageConfig":{ + "shape":"KernelGatewayImageConfig", + "documentation":"

The configuration of a KernelGateway app.

" + } + } + }, "DescribeAppRequest":{ "type":"structure", "required":[ @@ -5031,7 +7251,7 @@ "members":{ "AppArn":{ "shape":"AppArn", - "documentation":"

The app's Amazon Resource Name (ARN).

" + "documentation":"

The Amazon Resource Name (ARN) of the app.

" }, "AppType":{ "shape":"AppType", @@ -5075,6 +7295,52 @@ } } }, + "DescribeArtifactRequest":{ + "type":"structure", + "required":["ArtifactArn"], + "members":{ + "ArtifactArn":{ + "shape":"ArtifactArn", + "documentation":"

The Amazon Resource Name (ARN) of the artifact to describe.

" + } + } + }, + "DescribeArtifactResponse":{ + "type":"structure", + "members":{ + "ArtifactName":{ + "shape":"ExperimentEntityName", + "documentation":"

The name of the artifact.

" + }, + "ArtifactArn":{ + "shape":"ArtifactArn", + "documentation":"

The Amazon Resource Name (ARN) of the artifact.

" + }, + "Source":{ + "shape":"ArtifactSource", + "documentation":"

The source of the artifact.

" + }, + "ArtifactType":{ + "shape":"String256", + "documentation":"

The type of the artifact.

" + }, + "Properties":{ + "shape":"LineageEntityParameters", + "documentation":"

A list of the artifact's properties.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

When the artifact was created.

" + }, + "CreatedBy":{"shape":"UserContext"}, + "LastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

When the artifact was last modified.

" + }, + "LastModifiedBy":{"shape":"UserContext"}, + "MetadataProperties":{"shape":"MetadataProperties"} + } + }, "DescribeAutoMLJobRequest":{ "type":"structure", "required":["AutoMLJobName"], @@ -5280,6 +7546,10 @@ "shape":"ModelArtifacts", "documentation":"

Information about the location in Amazon S3 that has been configured for storing the model artifacts used in the compilation job.

" }, + "ModelDigests":{ + "shape":"ModelDigests", + "documentation":"

Provides a BLAKE2 hash value that identifies the compiled model artifacts in Amazon S3.

" + }, "RoleArn":{ "shape":"RoleArn", "documentation":"

The Amazon Resource Name (ARN) of the model compilation job.

" @@ -5294,6 +7564,55 @@ } } }, + "DescribeContextRequest":{ + "type":"structure", + "required":["ContextName"], + "members":{ + "ContextName":{ + "shape":"ExperimentEntityName", + "documentation":"

The name of the context to describe.

" + } + } + }, + "DescribeContextResponse":{ + "type":"structure", + "members":{ + "ContextName":{ + "shape":"ExperimentEntityName", + "documentation":"

The name of the context.

" + }, + "ContextArn":{ + "shape":"ContextArn", + "documentation":"

The Amazon Resource Name (ARN) of the context.

" + }, + "Source":{ + "shape":"ContextSource", + "documentation":"

The source of the context.

" + }, + "ContextType":{ + "shape":"String256", + "documentation":"

The type of the context.

" + }, + "Description":{ + "shape":"ExperimentDescription", + "documentation":"

The description of the context.

" + }, + "Properties":{ + "shape":"LineageEntityParameters", + "documentation":"

A list of the context's properties.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

When the context was created.

" + }, + "CreatedBy":{"shape":"UserContext"}, + "LastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

When the context was last modified.

" + }, + "LastModifiedBy":{"shape":"UserContext"} + } + }, "DescribeDomainRequest":{ "type":"structure", "required":["DomainId"], @@ -5349,11 +7668,17 @@ }, "DefaultUserSettings":{ "shape":"UserSettings", - "documentation":"

Settings which are applied to all UserProfile in this domain, if settings are not explicitly specified in a given UserProfile.

" + "documentation":"

Settings which are applied to all UserProfiles in this domain, if settings are not explicitly specified in a given UserProfile.

" + }, + "AppNetworkAccessType":{ + "shape":"AppNetworkAccessType", + "documentation":"

Specifies the VPC used for non-EFS traffic. The default value is PublicInternetOnly.

  • PublicInternetOnly - Non-EFS traffic is through a VPC managed by Amazon SageMaker, which allows direct internet access

  • VpcOnly - All Studio traffic is through the specified VPC and subnets

" }, "HomeEfsFileSystemKmsKeyId":{ "shape":"KmsKeyId", - "documentation":"

The AWS Key Management Service encryption key ID.

" + "documentation":"

This member is deprecated and replaced with KmsKeyId.

", + "deprecated":true, + "deprecatedMessage":"This property is deprecated, use KmsKeyId instead." }, "SubnetIds":{ "shape":"Subnets", @@ -5367,9 +7692,9 @@ "shape":"VpcId", "documentation":"

The ID of the Amazon Virtual Private Cloud (VPC) that Studio uses for communication.

" }, - "AppNetworkAccessType":{ - "shape":"AppNetworkAccessType", - "documentation":"

Specifies the VPC used for non-EFS traffic. The default value is PublicInternetOnly.

  • PublicInternetOnly - Non-EFS traffic is through a VPC managed by Amazon SageMaker, which allows direct internet access

  • VpcOnly - All Studio traffic is through the specified VPC and subnets

" + "KmsKeyId":{ + "shape":"KmsKeyId", + "documentation":"

The AWS KMS customer managed CMK used to encrypt the EFS volume attached to the domain.

" } } }, @@ -5468,6 +7793,10 @@ "LastModifiedTime":{ "shape":"Timestamp", "documentation":"

A timestamp that shows when the endpoint was last modified.

" + }, + "LastDeploymentConfig":{ + "shape":"DeploymentConfig", + "documentation":"

The most recent deployment configuration for the endpoint.

" } } }, @@ -5522,6 +7851,90 @@ } } }, + "DescribeFeatureGroupRequest":{ + "type":"structure", + "required":["FeatureGroupName"], + "members":{ + "FeatureGroupName":{ + "shape":"FeatureGroupName", + "documentation":"

The name of the FeatureGroup you want described.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A token to resume pagination of the list of Features (FeatureDefinitions). 2,500 Features are returned by default.

" + } + } + }, + "DescribeFeatureGroupResponse":{ + "type":"structure", + "required":[ + "FeatureGroupArn", + "FeatureGroupName", + "RecordIdentifierFeatureName", + "EventTimeFeatureName", + "FeatureDefinitions", + "CreationTime", + "NextToken" + ], + "members":{ + "FeatureGroupArn":{ + "shape":"FeatureGroupArn", + "documentation":"

The Amazon Resource Name (ARN) of the FeatureGroup.

" + }, + "FeatureGroupName":{ + "shape":"FeatureGroupName", + "documentation":"

he name of the FeatureGroup.

" + }, + "RecordIdentifierFeatureName":{ + "shape":"FeatureName", + "documentation":"

The name of the Feature used for RecordIdentifier, whose value uniquely identifies a record stored in the feature store.

" + }, + "EventTimeFeatureName":{ + "shape":"FeatureName", + "documentation":"

The name of the feature that stores the EventTime of a Record in a FeatureGroup.

An EventTime is a point in time when a new event occurs that corresponds to the creation or update of a Record in a FeatureGroup. All Records in the FeatureGroup have a corresponding EventTime.

" + }, + "FeatureDefinitions":{ + "shape":"FeatureDefinitions", + "documentation":"

A list of the Features in the FeatureGroup. Each feature is defined by a FeatureName and FeatureType.

" + }, + "CreationTime":{ + "shape":"CreationTime", + "documentation":"

A timestamp indicating when SageMaker created the FeatureGroup.

" + }, + "OnlineStoreConfig":{ + "shape":"OnlineStoreConfig", + "documentation":"

The configuration for the OnlineStore.

" + }, + "OfflineStoreConfig":{ + "shape":"OfflineStoreConfig", + "documentation":"

The configuration of the OfflineStore, inducing the S3 location of the OfflineStore, AWS Glue or AWS Hive data catalogue configurations, and the security configuration.

" + }, + "RoleArn":{ + "shape":"RoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the IAM execution role used to persist data into the OfflineStore if an OfflineStoreConfig is provided.

" + }, + "FeatureGroupStatus":{ + "shape":"FeatureGroupStatus", + "documentation":"

The status of the feature group.

" + }, + "OfflineStoreStatus":{ + "shape":"OfflineStoreStatus", + "documentation":"

The status of the OfflineStore. Notifies you if replicating data into the OfflineStore has failed. Returns either: Active or Blocked

" + }, + "FailureReason":{ + "shape":"FailureReason", + "documentation":"

The reason that the FeatureGroup failed to be replicated in the OfflineStore. This is failure can occur because:

  • The FeatureGroup could not be created in the OfflineStore.

  • The FeatureGroup could not be deleted from the OfflineStore.

" + }, + "Description":{ + "shape":"Description", + "documentation":"

A free form description of the feature group.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A token to resume pagination of the list of Features (FeatureDefinitions).

" + } + } + }, "DescribeFlowDefinitionRequest":{ "type":"structure", "required":["FlowDefinitionName"], @@ -5708,6 +8121,112 @@ } } }, + "DescribeImageRequest":{ + "type":"structure", + "required":["ImageName"], + "members":{ + "ImageName":{ + "shape":"ImageName", + "documentation":"

The name of the image to describe.

" + } + } + }, + "DescribeImageResponse":{ + "type":"structure", + "members":{ + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

When the image was created.

" + }, + "Description":{ + "shape":"ImageDescription", + "documentation":"

The description of the image.

" + }, + "DisplayName":{ + "shape":"ImageDisplayName", + "documentation":"

The name of the image as displayed.

" + }, + "FailureReason":{ + "shape":"FailureReason", + "documentation":"

When a create, update, or delete operation fails, the reason for the failure.

" + }, + "ImageArn":{ + "shape":"ImageArn", + "documentation":"

The Amazon Resource Name (ARN) of the image.

" + }, + "ImageName":{ + "shape":"ImageName", + "documentation":"

The name of the image.

" + }, + "ImageStatus":{ + "shape":"ImageStatus", + "documentation":"

The status of the image.

" + }, + "LastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

When the image was last modified.

" + }, + "RoleArn":{ + "shape":"RoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the IAM role that enables Amazon SageMaker to perform tasks on your behalf.

" + } + } + }, + "DescribeImageVersionRequest":{ + "type":"structure", + "required":["ImageName"], + "members":{ + "ImageName":{ + "shape":"ImageName", + "documentation":"

The name of the image.

" + }, + "Version":{ + "shape":"ImageVersionNumber", + "documentation":"

The version of the image. If not specified, the latest version is described.

" + } + } + }, + "DescribeImageVersionResponse":{ + "type":"structure", + "members":{ + "BaseImage":{ + "shape":"ImageBaseImage", + "documentation":"

The registry path of the container image on which this image version is based.

" + }, + "ContainerImage":{ + "shape":"ImageContainerImage", + "documentation":"

The registry path of the container image that contains this image version.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

When the version was created.

" + }, + "FailureReason":{ + "shape":"FailureReason", + "documentation":"

When a create or delete operation fails, the reason for the failure.

" + }, + "ImageArn":{ + "shape":"ImageArn", + "documentation":"

The Amazon Resource Name (ARN) of the image the version is based on.

" + }, + "ImageVersionArn":{ + "shape":"ImageVersionArn", + "documentation":"

The ARN of the version.

" + }, + "ImageVersionStatus":{ + "shape":"ImageVersionStatus", + "documentation":"

The status of the version.

" + }, + "LastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

When the version was last modified.

" + }, + "Version":{ + "shape":"ImageVersionNumber", + "documentation":"

The version number.

" + } + } + }, "DescribeLabelingJobRequest":{ "type":"structure", "required":["LabelingJobName"], @@ -5800,7 +8319,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

An array of key/value pairs. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide.

" + "documentation":"

An array of key-value pairs. You can use tags to categorize your AWS resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging AWS Resources.

" }, "LabelingJobOutput":{ "shape":"LabelingJobOutput", @@ -5861,12 +8380,55 @@ } } }, + "DescribeModelPackageGroupInput":{ + "type":"structure", + "required":["ModelPackageGroupName"], + "members":{ + "ModelPackageGroupName":{ + "shape":"ArnOrName", + "documentation":"

The name of the model group to describe.

" + } + } + }, + "DescribeModelPackageGroupOutput":{ + "type":"structure", + "required":[ + "ModelPackageGroupName", + "ModelPackageGroupArn", + "CreationTime", + "CreatedBy", + "ModelPackageGroupStatus" + ], + "members":{ + "ModelPackageGroupName":{ + "shape":"EntityName", + "documentation":"

The name of the model group.

" + }, + "ModelPackageGroupArn":{ + "shape":"ModelPackageGroupArn", + "documentation":"

The Amazon Resource Name (ARN) of the model group.

" + }, + "ModelPackageGroupDescription":{ + "shape":"EntityDescription", + "documentation":"

A description of the model group.

" + }, + "CreationTime":{ + "shape":"CreationTime", + "documentation":"

The time that the model group was created.

" + }, + "CreatedBy":{"shape":"UserContext"}, + "ModelPackageGroupStatus":{ + "shape":"ModelPackageGroupStatus", + "documentation":"

The status of the model group.

" + } + } + }, "DescribeModelPackageInput":{ "type":"structure", "required":["ModelPackageName"], "members":{ "ModelPackageName":{ - "shape":"ArnOrName", + "shape":"VersionedArnOrName", "documentation":"

The name of the model package to describe.

" } } @@ -5885,6 +8447,14 @@ "shape":"EntityName", "documentation":"

The name of the model package being described.

" }, + "ModelPackageGroupName":{ + "shape":"EntityName", + "documentation":"

If the model is a versioned model, the name of the model group that the versioned model belongs to.

" + }, + "ModelPackageVersion":{ + "shape":"ModelPackageVersion", + "documentation":"

The version of the model package.

" + }, "ModelPackageArn":{ "shape":"ModelPackageArn", "documentation":"

The Amazon Resource Name (ARN) of the model package.

" @@ -5920,6 +8490,25 @@ "CertifyForMarketplace":{ "shape":"CertifyForMarketplace", "documentation":"

Whether the model package is certified for listing on AWS Marketplace.

" + }, + "ModelApprovalStatus":{ + "shape":"ModelApprovalStatus", + "documentation":"

The approval status of the model package.

" + }, + "CreatedBy":{"shape":"UserContext"}, + "MetadataProperties":{"shape":"MetadataProperties"}, + "ModelMetrics":{ + "shape":"ModelMetrics", + "documentation":"

Metrics for the model.

" + }, + "LastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

The last time the model package was modified.

" + }, + "LastModifiedBy":{"shape":"UserContext"}, + "ApprovalDescription":{ + "shape":"ApprovalDescription", + "documentation":"

A description provided for the model approval.

" } } }, @@ -6116,17 +8705,142 @@ } } }, - "DescribeProcessingJobRequest":{ + "DescribePipelineDefinitionForExecutionRequest":{ "type":"structure", - "required":["ProcessingJobName"], + "required":["PipelineExecutionArn"], "members":{ - "ProcessingJobName":{ - "shape":"ProcessingJobName", - "documentation":"

The name of the processing job. The name must be unique within an AWS Region in the AWS account.

" + "PipelineExecutionArn":{ + "shape":"PipelineExecutionArn", + "documentation":"

The Amazon Resource Name (ARN) of the pipeline execution.

" } } }, - "DescribeProcessingJobResponse":{ + "DescribePipelineDefinitionForExecutionResponse":{ + "type":"structure", + "members":{ + "PipelineDefinition":{ + "shape":"PipelineDefinition", + "documentation":"

The JSON pipeline definition.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

The time when the pipeline was created.

" + } + } + }, + "DescribePipelineExecutionRequest":{ + "type":"structure", + "required":["PipelineExecutionArn"], + "members":{ + "PipelineExecutionArn":{ + "shape":"PipelineExecutionArn", + "documentation":"

The Amazon Resource Name (ARN) of the pipeline execution.

" + } + } + }, + "DescribePipelineExecutionResponse":{ + "type":"structure", + "members":{ + "PipelineArn":{ + "shape":"PipelineArn", + "documentation":"

The Amazon Resource Name (ARN) of the pipeline.

" + }, + "PipelineExecutionArn":{ + "shape":"PipelineExecutionArn", + "documentation":"

The Amazon Resource Name (ARN) of the pipeline execution.

" + }, + "PipelineExecutionDisplayName":{ + "shape":"PipelineExecutionName", + "documentation":"

The display name of the pipeline execution.

" + }, + "PipelineExecutionStatus":{ + "shape":"PipelineExecutionStatus", + "documentation":"

The status of the pipeline execution.

" + }, + "PipelineExecutionDescription":{ + "shape":"PipelineExecutionDescription", + "documentation":"

The description of the pipeline execution.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

The time when the pipeline execution was created.

" + }, + "LastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

The time when the pipeline execution was modified last.

" + }, + "CreatedBy":{"shape":"UserContext"}, + "LastModifiedBy":{"shape":"UserContext"} + } + }, + "DescribePipelineRequest":{ + "type":"structure", + "required":["PipelineName"], + "members":{ + "PipelineName":{ + "shape":"PipelineName", + "documentation":"

The name of the pipeline to describe.

" + } + } + }, + "DescribePipelineResponse":{ + "type":"structure", + "members":{ + "PipelineArn":{ + "shape":"PipelineArn", + "documentation":"

The Amazon Resource Name (ARN) of the pipeline.

" + }, + "PipelineName":{ + "shape":"PipelineName", + "documentation":"

The name of the pipeline.

" + }, + "PipelineDisplayName":{ + "shape":"PipelineName", + "documentation":"

The display name of the pipeline.

" + }, + "PipelineDefinition":{ + "shape":"PipelineDefinition", + "documentation":"

The JSON pipeline definition.

" + }, + "PipelineDescription":{ + "shape":"PipelineDescription", + "documentation":"

The description of the pipeline.

" + }, + "RoleArn":{ + "shape":"RoleArn", + "documentation":"

The Amazon Resource Name (ARN) that the pipeline uses to execute.

" + }, + "PipelineStatus":{ + "shape":"PipelineStatus", + "documentation":"

The status of the pipeline execution.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

The time when the pipeline was created.

" + }, + "LastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

The time when the pipeline was last modified.

" + }, + "LastRunTime":{ + "shape":"Timestamp", + "documentation":"

The time when the pipeline was last run.

" + }, + "CreatedBy":{"shape":"UserContext"}, + "LastModifiedBy":{"shape":"UserContext"} + } + }, + "DescribeProcessingJobRequest":{ + "type":"structure", + "required":["ProcessingJobName"], + "members":{ + "ProcessingJobName":{ + "shape":"ProcessingJobName", + "documentation":"

The name of the processing job. The name must be unique within an AWS Region in the AWS account.

" + } + } + }, + "DescribeProcessingJobResponse":{ "type":"structure", "required":[ "ProcessingJobName", @@ -6223,6 +8937,62 @@ } } }, + "DescribeProjectInput":{ + "type":"structure", + "required":["ProjectName"], + "members":{ + "ProjectName":{ + "shape":"ProjectEntityName", + "documentation":"

The name of the project to describe.

" + } + } + }, + "DescribeProjectOutput":{ + "type":"structure", + "required":[ + "ProjectArn", + "ProjectName", + "ProjectId", + "ServiceCatalogProvisioningDetails", + "ProjectStatus", + "CreationTime" + ], + "members":{ + "ProjectArn":{ + "shape":"ProjectArn", + "documentation":"

The Amazon Resource Name (ARN) of the project.

" + }, + "ProjectName":{ + "shape":"ProjectEntityName", + "documentation":"

The name of the project.

" + }, + "ProjectId":{ + "shape":"ProjectId", + "documentation":"

The ID of the project.

" + }, + "ProjectDescription":{ + "shape":"EntityDescription", + "documentation":"

The description of the project.

" + }, + "ServiceCatalogProvisioningDetails":{ + "shape":"ServiceCatalogProvisioningDetails", + "documentation":"

Information used to provision a service catalog product. For information, see What is AWS Service Catalog.

" + }, + "ServiceCatalogProvisionedProductDetails":{ + "shape":"ServiceCatalogProvisionedProductDetails", + "documentation":"

Information about a provisioned service catalog product.

" + }, + "ProjectStatus":{ + "shape":"ProjectStatus", + "documentation":"

The status of the project.

" + }, + "CreatedBy":{"shape":"UserContext"}, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

The time when the project was created.

" + } + } + }, "DescribeSubscribedWorkteamRequest":{ "type":"structure", "required":["WorkteamArn"], @@ -6560,6 +9330,7 @@ "shape":"TrialComponentArtifacts", "documentation":"

The output artifacts of the component.

" }, + "MetadataProperties":{"shape":"MetadataProperties"}, "Metrics":{ "shape":"TrialComponentMetricSummaries", "documentation":"

The metrics for the component.

" @@ -6614,7 +9385,8 @@ "LastModifiedBy":{ "shape":"UserContext", "documentation":"

Who last modified the trial.

" - } + }, + "MetadataProperties":{"shape":"MetadataProperties"} } }, "DescribeUserProfileRequest":{ @@ -6723,6 +9495,10 @@ } } }, + "Description":{ + "type":"string", + "max":128 + }, "DesiredWeightAndCapacity":{ "type":"structure", "required":["VariantName"], @@ -6782,6 +9558,16 @@ "max":4096, "pattern":".*" }, + "DisableSagemakerServicecatalogPortfolioInput":{ + "type":"structure", + "members":{ + } + }, + "DisableSagemakerServicecatalogPortfolioOutput":{ + "type":"structure", + "members":{ + } + }, "DisassociateAdditionalCodeRepositories":{"type":"boolean"}, "DisassociateDefaultCodeRepository":{"type":"boolean"}, "DisassociateNotebookInstanceAcceleratorTypes":{"type":"boolean"}, @@ -6871,7 +9657,7 @@ "DomainName":{ "type":"string", "max":63, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9])*" + "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" }, "DomainStatus":{ "type":"string", @@ -6879,7 +9665,10 @@ "Deleting", "Failed", "InService", - "Pending" + "Pending", + "Updating", + "Update_Failed", + "Delete_Failed" ] }, "DoubleParameterValue":{"type":"double"}, @@ -6889,6 +9678,71 @@ "pattern":"\\d+" }, "EnableCapture":{"type":"boolean"}, + "EnableSagemakerServicecatalogPortfolioInput":{ + "type":"structure", + "members":{ + } + }, + "EnableSagemakerServicecatalogPortfolioOutput":{ + "type":"structure", + "members":{ + } + }, + "Endpoint":{ + "type":"structure", + "required":[ + "EndpointName", + "EndpointArn", + "EndpointConfigName", + "EndpointStatus", + "CreationTime", + "LastModifiedTime" + ], + "members":{ + "EndpointName":{ + "shape":"EndpointName", + "documentation":"

The name of the endpoint.

" + }, + "EndpointArn":{ + "shape":"EndpointArn", + "documentation":"

The Amazon Resource Name (ARN) of the endpoint.

" + }, + "EndpointConfigName":{ + "shape":"EndpointConfigName", + "documentation":"

The endpoint configuration associated with the endpoint.

" + }, + "ProductionVariants":{ + "shape":"ProductionVariantSummaryList", + "documentation":"

A list of the production variants hosted on the endpoint. Each production variant is a model.

" + }, + "DataCaptureConfig":{"shape":"DataCaptureConfigSummary"}, + "EndpointStatus":{ + "shape":"EndpointStatus", + "documentation":"

The status of the endpoint.

" + }, + "FailureReason":{ + "shape":"FailureReason", + "documentation":"

If the endpoint failed, the reason it failed.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

The time that the endpoint was created.

" + }, + "LastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

The last time the endpoint was modified.

" + }, + "MonitoringSchedules":{ + "shape":"MonitoringScheduleList", + "documentation":"

A list of monitoring schedules for the endpoint. For information about model monitoring, see Amazon SageMaker Model Monitor.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

A list of the tags associated with the endpoint. For more information, see Tagging AWS resources in the AWS General Reference Guide.

" + } + }, + "documentation":"

A hosted endpoint for real-time inference.

" + }, "EndpointArn":{ "type":"string", "max":2048, @@ -6904,7 +9758,7 @@ "EndpointConfigName":{ "type":"string", "max":63, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9])*" + "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" }, "EndpointConfigNameContains":{ "type":"string", @@ -6974,7 +9828,7 @@ "EndpointName":{ "type":"string", "max":63, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9])*" + "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" }, "EndpointNameContains":{ "type":"string", @@ -7048,7 +9902,7 @@ "type":"string", "max":63, "min":1, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9])*$" + "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$" }, "EnvironmentKey":{ "type":"string", @@ -7150,9 +10004,9 @@ }, "ExperimentEntityName":{ "type":"string", - "max":82, + "max":120, "min":1, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9])*" + "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,119}" }, "ExperimentSource":{ "type":"structure", @@ -7205,91 +10059,290 @@ }, "documentation":"

A summary of the properties of an experiment. To get the complete set of properties, call the DescribeExperiment API and provide the ExperimentName.

" }, + "Explainability":{ + "type":"structure", + "members":{ + "Report":{ + "shape":"MetricsSource", + "documentation":"

The explainability report for a model.

" + } + }, + "documentation":"

Contains explainability metrics for a model.

" + }, "FailureReason":{ "type":"string", "max":1024 }, - "FileSystemAccessMode":{ - "type":"string", - "enum":[ - "rw", - "ro" - ] - }, - "FileSystemDataSource":{ + "FeatureDefinition":{ "type":"structure", - "required":[ - "FileSystemId", - "FileSystemAccessMode", - "FileSystemType", - "DirectoryPath" - ], "members":{ - "FileSystemId":{ - "shape":"FileSystemId", - "documentation":"

The file system id.

" - }, - "FileSystemAccessMode":{ - "shape":"FileSystemAccessMode", - "documentation":"

The access mode of the mount of the directory associated with the channel. A directory can be mounted either in ro (read-only) or rw (read-write) mode.

" - }, - "FileSystemType":{ - "shape":"FileSystemType", - "documentation":"

The file system type.

" + "FeatureName":{ + "shape":"FeatureName", + "documentation":"

The name of a feature. The type must be a string. FeatureName cannot be any of the following: is_deleted, write_time, api_invocation_time.

" }, - "DirectoryPath":{ - "shape":"DirectoryPath", - "documentation":"

The full path to the directory to associate with the channel.

" + "FeatureType":{ + "shape":"FeatureType", + "documentation":"

The value type of a feature. Valid values are Integral, Fractional, or String.

" } }, - "documentation":"

Specifies a file system data source for a channel.

" - }, - "FileSystemId":{ - "type":"string", - "min":11, - "pattern":".*" + "documentation":"

A list of features. You must include FeatureName and FeatureType. Valid feature FeatureTypes are Integral, Fractional and String.

" }, - "FileSystemType":{ - "type":"string", - "enum":[ - "EFS", - "FSxLustre" - ] + "FeatureDefinitions":{ + "type":"list", + "member":{"shape":"FeatureDefinition"}, + "max":2500, + "min":1 }, - "Filter":{ + "FeatureGroup":{ "type":"structure", - "required":["Name"], "members":{ - "Name":{ - "shape":"ResourcePropertyName", - "documentation":"

A resource property name. For example, TrainingJobName. For valid property names, see SearchRecord. You must specify a valid property for the resource.

" + "FeatureGroupArn":{ + "shape":"FeatureGroupArn", + "documentation":"

The Amazon Resource Name (ARN) of a FeatureGroup.

" }, - "Operator":{ - "shape":"Operator", - "documentation":"

A Boolean binary operator that is used to evaluate the filter. The operator field contains one of the following values:

Equals

The value of Name equals Value.

NotEquals

The value of Name doesn't equal Value.

Exists

The Name property exists.

NotExists

The Name property does not exist.

GreaterThan

The value of Name is greater than Value. Not supported for text properties.

GreaterThanOrEqualTo

The value of Name is greater than or equal to Value. Not supported for text properties.

LessThan

The value of Name is less than Value. Not supported for text properties.

LessThanOrEqualTo

The value of Name is less than or equal to Value. Not supported for text properties.

In

The value of Name is one of the comma delimited strings in Value. Only supported for text properties.

Contains

The value of Name contains the string Value. Only supported for text properties.

A SearchExpression can include the Contains operator multiple times when the value of Name is one of the following:

  • Experiment.DisplayName

  • Experiment.ExperimentName

  • Experiment.Tags

  • Trial.DisplayName

  • Trial.TrialName

  • Trial.Tags

  • TrialComponent.DisplayName

  • TrialComponent.TrialComponentName

  • TrialComponent.Tags

  • TrialComponent.InputArtifacts

  • TrialComponent.OutputArtifacts

A SearchExpression can include only one Contains operator for all other values of Name. In these cases, if you include multiple Contains operators in the SearchExpression, the result is the following error message: \"'CONTAINS' operator usage limit of 1 exceeded.\"

" + "FeatureGroupName":{ + "shape":"FeatureGroupName", + "documentation":"

The name of the FeatureGroup.

" }, - "Value":{ - "shape":"FilterValue", - "documentation":"

A value used with Name and Operator to determine which resources satisfy the filter's condition. For numerical properties, Value must be an integer or floating-point decimal. For timestamp properties, Value must be an ISO 8601 date-time string of the following format: YYYY-mm-dd'T'HH:MM:SS.

" + "RecordIdentifierFeatureName":{ + "shape":"FeatureName", + "documentation":"

The name of the Feature whose value uniquely identifies a Record defined in the FeatureGroup FeatureDefinitions.

" + }, + "EventTimeFeatureName":{ + "shape":"FeatureName", + "documentation":"

The name of the feature that stores the EventTime of a Record in a FeatureGroup.

A EventTime is point in time when a new event occurs that corresponds to the creation or update of a Record in FeatureGroup. All Records in the FeatureGroup must have a corresponding EventTime.

" + }, + "FeatureDefinitions":{ + "shape":"FeatureDefinitions", + "documentation":"

A list of Features. Each Feature must include a FeatureName and a FeatureType.

Valid FeatureTypes are Integral, Fractional and String.

FeatureNames cannot be any of the following: is_deleted, write_time, api_invocation_time.

You can create up to 2,500 FeatureDefinitions per FeatureGroup.

" + }, + "CreationTime":{ + "shape":"CreationTime", + "documentation":"

The time a FeatureGroup was created.

" + }, + "OnlineStoreConfig":{"shape":"OnlineStoreConfig"}, + "OfflineStoreConfig":{"shape":"OfflineStoreConfig"}, + "RoleArn":{ + "shape":"RoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the IAM execution role used to create the feature group.

" + }, + "FeatureGroupStatus":{ + "shape":"FeatureGroupStatus", + "documentation":"

A FeatureGroup status.

" + }, + "OfflineStoreStatus":{"shape":"OfflineStoreStatus"}, + "FailureReason":{ + "shape":"FailureReason", + "documentation":"

The reason that the FeatureGroup failed to be replicated in the OfflineStore. This is failure may be due to a failure to create a FeatureGroup in or delete a FeatureGroup from the OfflineStore.

" + }, + "Description":{ + "shape":"Description", + "documentation":"

A free form description of a FeatureGroup.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

Tags used to define a FeatureGroup.

" } }, - "documentation":"

A conditional statement for a search expression that includes a resource property, a Boolean operator, and a value. Resources that match the statement are returned in the results from the Search API.

If you specify a Value, but not an Operator, Amazon SageMaker uses the equals operator.

In search, there are several property types:

Metrics

To define a metric filter, enter a value using the form \"Metrics.<name>\", where <name> is a metric name. For example, the following filter searches for training jobs with an \"accuracy\" metric greater than \"0.9\":

{

\"Name\": \"Metrics.accuracy\",

\"Operator\": \"GreaterThan\",

\"Value\": \"0.9\"

}

HyperParameters

To define a hyperparameter filter, enter a value with the form \"HyperParameters.<name>\". Decimal hyperparameter values are treated as a decimal in a comparison if the specified Value is also a decimal value. If the specified Value is an integer, the decimal hyperparameter values are treated as integers. For example, the following filter is satisfied by training jobs with a \"learning_rate\" hyperparameter that is less than \"0.5\":

{

\"Name\": \"HyperParameters.learning_rate\",

\"Operator\": \"LessThan\",

\"Value\": \"0.5\"

}

Tags

To define a tag filter, enter a value with the form Tags.<key>.

" + "documentation":"

Amazon SageMaker Feature Store stores features in a collection called Feature Group. A Feature Group can be visualized as a table which has rows, with a unique identifier for each row where each column in the table is a feature. In principle, a Feature Group is composed of features and values per features.

" }, - "FilterList":{ - "type":"list", - "member":{"shape":"Filter"}, - "max":20, + "FeatureGroupArn":{ + "type":"string", + "max":256, + "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:feature-group/.*" + }, + "FeatureGroupMaxResults":{ + "type":"integer", + "max":100, "min":1 }, - "FilterValue":{ + "FeatureGroupName":{ "type":"string", - "max":1024, + "max":64, "min":1, - "pattern":".+" + "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,63}" }, - "FinalAutoMLJobObjectiveMetric":{ - "type":"structure", - "required":[ + "FeatureGroupNameContains":{ + "type":"string", + "max":64, + "min":1 + }, + "FeatureGroupSortBy":{ + "type":"string", + "enum":[ + "Name", + "FeatureGroupStatus", + "OfflineStoreStatus", + "CreationTime" + ] + }, + "FeatureGroupSortOrder":{ + "type":"string", + "enum":[ + "Ascending", + "Descending" + ] + }, + "FeatureGroupStatus":{ + "type":"string", + "enum":[ + "Creating", + "Created", + "CreateFailed", + "Deleting", + "DeleteFailed" + ] + }, + "FeatureGroupSummaries":{ + "type":"list", + "member":{"shape":"FeatureGroupSummary"} + }, + "FeatureGroupSummary":{ + "type":"structure", + "required":[ + "FeatureGroupName", + "FeatureGroupArn", + "CreationTime" + ], + "members":{ + "FeatureGroupName":{ + "shape":"FeatureGroupName", + "documentation":"

The name of FeatureGroup.

" + }, + "FeatureGroupArn":{ + "shape":"FeatureGroupArn", + "documentation":"

Unique identifier for the FeatureGroup.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

A timestamp indicating the time of creation time of the FeatureGroup.

" + }, + "FeatureGroupStatus":{ + "shape":"FeatureGroupStatus", + "documentation":"

The status of a FeatureGroup. The status can be any of the following: Creating, Created, CreateFail, Deleting or DetailFail.

" + }, + "OfflineStoreStatus":{ + "shape":"OfflineStoreStatus", + "documentation":"

Notifies you if replicating data into the OfflineStore has failed. Returns either: Active or Blocked.

" + } + }, + "documentation":"

The name, Arn, CreationTime, FeatureGroup values, LastUpdatedTime and EnableOnlineStorage status of a FeatureGroup.

" + }, + "FeatureName":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[a-zA-Z0-9]([-_]*[a-zA-Z0-9]){0,63}" + }, + "FeatureType":{ + "type":"string", + "enum":[ + "Integral", + "Fractional", + "String" + ] + }, + "FileSystemAccessMode":{ + "type":"string", + "enum":[ + "rw", + "ro" + ] + }, + "FileSystemConfig":{ + "type":"structure", + "members":{ + "MountPath":{ + "shape":"MountPath", + "documentation":"

The path within the image to mount the user's EFS home directory. The directory should be empty. If not specified, defaults to /home/sagemaker-user.

" + }, + "DefaultUid":{ + "shape":"DefaultUid", + "documentation":"

The default POSIX user ID (UID). If not specified, defaults to 1000.

", + "box":true + }, + "DefaultGid":{ + "shape":"DefaultGid", + "documentation":"

The default POSIX group ID (GID). If not specified, defaults to 100.

", + "box":true + } + }, + "documentation":"

The Amazon Elastic File System (EFS) storage configuration for a SageMaker image.

" + }, + "FileSystemDataSource":{ + "type":"structure", + "required":[ + "FileSystemId", + "FileSystemAccessMode", + "FileSystemType", + "DirectoryPath" + ], + "members":{ + "FileSystemId":{ + "shape":"FileSystemId", + "documentation":"

The file system id.

" + }, + "FileSystemAccessMode":{ + "shape":"FileSystemAccessMode", + "documentation":"

The access mode of the mount of the directory associated with the channel. A directory can be mounted either in ro (read-only) or rw (read-write) mode.

" + }, + "FileSystemType":{ + "shape":"FileSystemType", + "documentation":"

The file system type.

" + }, + "DirectoryPath":{ + "shape":"DirectoryPath", + "documentation":"

The full path to the directory to associate with the channel.

" + } + }, + "documentation":"

Specifies a file system data source for a channel.

" + }, + "FileSystemId":{ + "type":"string", + "min":11, + "pattern":".*" + }, + "FileSystemType":{ + "type":"string", + "enum":[ + "EFS", + "FSxLustre" + ] + }, + "Filter":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"ResourcePropertyName", + "documentation":"

A resource property name. For example, TrainingJobName. For valid property names, see SearchRecord. You must specify a valid property for the resource.

" + }, + "Operator":{ + "shape":"Operator", + "documentation":"

A Boolean binary operator that is used to evaluate the filter. The operator field contains one of the following values:

Equals

The value of Name equals Value.

NotEquals

The value of Name doesn't equal Value.

Exists

The Name property exists.

NotExists

The Name property does not exist.

GreaterThan

The value of Name is greater than Value. Not supported for text properties.

GreaterThanOrEqualTo

The value of Name is greater than or equal to Value. Not supported for text properties.

LessThan

The value of Name is less than Value. Not supported for text properties.

LessThanOrEqualTo

The value of Name is less than or equal to Value. Not supported for text properties.

In

The value of Name is one of the comma delimited strings in Value. Only supported for text properties.

Contains

The value of Name contains the string Value. Only supported for text properties.

A SearchExpression can include the Contains operator multiple times when the value of Name is one of the following:

  • Experiment.DisplayName

  • Experiment.ExperimentName

  • Experiment.Tags

  • Trial.DisplayName

  • Trial.TrialName

  • Trial.Tags

  • TrialComponent.DisplayName

  • TrialComponent.TrialComponentName

  • TrialComponent.Tags

  • TrialComponent.InputArtifacts

  • TrialComponent.OutputArtifacts

A SearchExpression can include only one Contains operator for all other values of Name. In these cases, if you include multiple Contains operators in the SearchExpression, the result is the following error message: \"'CONTAINS' operator usage limit of 1 exceeded.\"

" + }, + "Value":{ + "shape":"FilterValue", + "documentation":"

A value used with Name and Operator to determine which resources satisfy the filter's condition. For numerical properties, Value must be an integer or floating-point decimal. For timestamp properties, Value must be an ISO 8601 date-time string of the following format: YYYY-mm-dd'T'HH:MM:SS.

" + } + }, + "documentation":"

A conditional statement for a search expression that includes a resource property, a Boolean operator, and a value. Resources that match the statement are returned in the results from the Search API.

If you specify a Value, but not an Operator, Amazon SageMaker uses the equals operator.

In search, there are several property types:

Metrics

To define a metric filter, enter a value using the form \"Metrics.<name>\", where <name> is a metric name. For example, the following filter searches for training jobs with an \"accuracy\" metric greater than \"0.9\":

{

\"Name\": \"Metrics.accuracy\",

\"Operator\": \"GreaterThan\",

\"Value\": \"0.9\"

}

HyperParameters

To define a hyperparameter filter, enter a value with the form \"HyperParameters.<name>\". Decimal hyperparameter values are treated as a decimal in a comparison if the specified Value is also a decimal value. If the specified Value is an integer, the decimal hyperparameter values are treated as integers. For example, the following filter is satisfied by training jobs with a \"learning_rate\" hyperparameter that is less than \"0.5\":

{

\"Name\": \"HyperParameters.learning_rate\",

\"Operator\": \"LessThan\",

\"Value\": \"0.5\"

}

Tags

To define a tag filter, enter a value with the form Tags.<key>.

" + }, + "FilterList":{ + "type":"list", + "member":{"shape":"Filter"}, + "max":20, + "min":1 + }, + "FilterValue":{ + "type":"string", + "max":1024, + "min":1, + "pattern":".+" + }, + "FinalAutoMLJobObjectiveMetric":{ + "type":"structure", + "required":[ "MetricName", "Value" ], @@ -7347,7 +10400,7 @@ "type":"string", "max":63, "min":1, - "pattern":"^[a-z0-9](-*[a-z0-9])*" + "pattern":"^[a-z0-9](-*[a-z0-9]){0,62}" }, "FlowDefinitionOutputConfig":{ "type":"structure", @@ -7457,10 +10510,46 @@ "ONNX", "PYTORCH", "XGBOOST", - "TFLITE" + "TFLITE", + "DARKNET", + "SKLEARN" ] }, "GenerateCandidateDefinitionsOnly":{"type":"boolean"}, + "GetModelPackageGroupPolicyInput":{ + "type":"structure", + "required":["ModelPackageGroupName"], + "members":{ + "ModelPackageGroupName":{ + "shape":"EntityName", + "documentation":"

The name of the model group for which to get the resource policy.

" + } + } + }, + "GetModelPackageGroupPolicyOutput":{ + "type":"structure", + "required":["ResourcePolicy"], + "members":{ + "ResourcePolicy":{ + "shape":"PolicyString", + "documentation":"

The resource policy for the model group.

" + } + } + }, + "GetSagemakerServicecatalogPortfolioStatusInput":{ + "type":"structure", + "members":{ + } + }, + "GetSagemakerServicecatalogPortfolioStatusOutput":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"SagemakerServicecatalogStatus", + "documentation":"

Whether Service Catalog is enabled or disabled in SageMaker.

" + } + } + }, "GetSearchSuggestionsRequest":{ "type":"structure", "required":["Resource"], @@ -7752,6 +10841,11 @@ }, "documentation":"

Specifies which training algorithm to use for training jobs that a hyperparameter tuning job launches and the metrics to monitor.

" }, + "HyperParameterKey":{ + "type":"string", + "max":256, + "pattern":".*" + }, "HyperParameterScalingType":{ "type":"string", "enum":[ @@ -7793,7 +10887,7 @@ "documentation":"

Indicates whether this hyperparameter is required.

" }, "DefaultValue":{ - "shape":"ParameterValue", + "shape":"HyperParameterValue", "documentation":"

The default value for this hyperparameter. If a default value is specified, a hyperparameter cannot be required.

" } }, @@ -7873,7 +10967,7 @@ "type":"string", "max":64, "min":1, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9])*" + "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,63}" }, "HyperParameterTrainingJobDefinitions":{ "type":"list", @@ -7989,7 +11083,7 @@ "type":"string", "max":32, "min":1, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9])*" + "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,31}" }, "HyperParameterTuningJobObjective":{ "type":"structure", @@ -8130,18 +11224,79 @@ "TransferLearning" ] }, + "HyperParameterValue":{ + "type":"string", + "max":2500, + "pattern":".*" + }, "HyperParameters":{ "type":"map", - "key":{"shape":"ParameterKey"}, - "value":{"shape":"ParameterValue"}, + "key":{"shape":"HyperParameterKey"}, + "value":{"shape":"HyperParameterValue"}, "max":100, "min":0 }, + "IdempotencyToken":{ + "type":"string", + "max":128, + "min":32 + }, + "Image":{ + "type":"structure", + "required":[ + "CreationTime", + "ImageArn", + "ImageName", + "ImageStatus", + "LastModifiedTime" + ], + "members":{ + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

When the image was created.

" + }, + "Description":{ + "shape":"ImageDescription", + "documentation":"

The description of the image.

" + }, + "DisplayName":{ + "shape":"ImageDisplayName", + "documentation":"

The name of the image as displayed.

" + }, + "FailureReason":{ + "shape":"FailureReason", + "documentation":"

When a create, update, or delete operation fails, the reason for the failure.

" + }, + "ImageArn":{ + "shape":"ImageArn", + "documentation":"

The Amazon Resource Name (ARN) of the image.

" + }, + "ImageName":{ + "shape":"ImageName", + "documentation":"

The name of the image.

" + }, + "ImageStatus":{ + "shape":"ImageStatus", + "documentation":"

The status of the image.

" + }, + "LastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

When the image was last modified.

" + } + }, + "documentation":"

A SageMaker image. A SageMaker image represents a set of container images that are derived from a common base container image. Each of these container images is represented by a SageMaker ImageVersion.

" + }, "ImageArn":{ "type":"string", "max":256, "pattern":"^arn:aws(-[\\w]+)*:sagemaker:.+:[0-9]{12}:image/[a-z0-9]([-.]?[a-z0-9])*$" }, + "ImageBaseImage":{ + "type":"string", + "max":255, + "min":1, + "pattern":".*" + }, "ImageConfig":{ "type":"structure", "required":["RepositoryAccessMode"], @@ -8153,37 +11308,185 @@ }, "documentation":"

Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC).

" }, + "ImageContainerImage":{ + "type":"string", + "max":255, + "min":1 + }, + "ImageDeleteProperty":{ + "type":"string", + "max":11, + "min":1, + "pattern":"(^DisplayName$)|(^Description$)" + }, + "ImageDeletePropertyList":{ + "type":"list", + "member":{"shape":"ImageDeleteProperty"}, + "max":2 + }, + "ImageDescription":{ + "type":"string", + "max":512, + "min":1, + "pattern":".*" + }, "ImageDigest":{ "type":"string", "max":72, "pattern":"^[Ss][Hh][Aa]256:[0-9a-fA-F]{64}$" }, + "ImageDisplayName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^\\S(.*\\S)?$" + }, + "ImageName":{ + "type":"string", + "max":63, + "min":1, + "pattern":"^[a-zA-Z0-9]([-.]?[a-zA-Z0-9]){0,62}$" + }, + "ImageNameContains":{ + "type":"string", + "max":63, + "pattern":"^[a-zA-Z0-9\\-.]+$" + }, + "ImageSortBy":{ + "type":"string", + "enum":[ + "CREATION_TIME", + "LAST_MODIFIED_TIME", + "IMAGE_NAME" + ] + }, + "ImageSortOrder":{ + "type":"string", + "enum":[ + "ASCENDING", + "DESCENDING" + ] + }, + "ImageStatus":{ + "type":"string", + "enum":[ + "CREATING", + "CREATED", + "CREATE_FAILED", + "UPDATING", + "UPDATE_FAILED", + "DELETING", + "DELETE_FAILED" + ] + }, "ImageUri":{ "type":"string", "max":255, "pattern":".*" }, - "InferenceSpecification":{ + "ImageVersion":{ "type":"structure", "required":[ - "Containers", - "SupportedTransformInstanceTypes", - "SupportedRealtimeInferenceInstanceTypes", - "SupportedContentTypes", - "SupportedResponseMIMETypes" + "CreationTime", + "ImageArn", + "ImageVersionArn", + "ImageVersionStatus", + "LastModifiedTime", + "Version" ], "members":{ - "Containers":{ - "shape":"ModelPackageContainerDefinitionList", - "documentation":"

The Amazon ECR registry path of the Docker image that contains the inference code.

" + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

When the version was created.

" }, - "SupportedTransformInstanceTypes":{ - "shape":"TransformInstanceTypes", - "documentation":"

A list of the instance types on which a transformation job can be run or on which an endpoint can be deployed.

" + "FailureReason":{ + "shape":"FailureReason", + "documentation":"

When a create or delete operation fails, the reason for the failure.

" }, - "SupportedRealtimeInferenceInstanceTypes":{ - "shape":"RealtimeInferenceInstanceTypes", - "documentation":"

A list of the instance types that are used to generate inferences in real-time.

" + "ImageArn":{ + "shape":"ImageArn", + "documentation":"

The Amazon Resource Name (ARN) of the image the version is based on.

" + }, + "ImageVersionArn":{ + "shape":"ImageVersionArn", + "documentation":"

The ARN of the version.

" + }, + "ImageVersionStatus":{ + "shape":"ImageVersionStatus", + "documentation":"

The status of the version.

" + }, + "LastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

When the version was last modified.

" + }, + "Version":{ + "shape":"ImageVersionNumber", + "documentation":"

The version number.

" + } + }, + "documentation":"

A version of a SageMaker Image. A version represents an existing container image.

" + }, + "ImageVersionArn":{ + "type":"string", + "max":256, + "pattern":"^arn:aws(-[\\w]+)*:sagemaker:.+:[0-9]{12}:image-version/[a-z0-9]([-.]?[a-z0-9])*/[0-9]+$" + }, + "ImageVersionNumber":{ + "type":"integer", + "min":0 + }, + "ImageVersionSortBy":{ + "type":"string", + "enum":[ + "CREATION_TIME", + "LAST_MODIFIED_TIME", + "VERSION" + ] + }, + "ImageVersionSortOrder":{ + "type":"string", + "enum":[ + "ASCENDING", + "DESCENDING" + ] + }, + "ImageVersionStatus":{ + "type":"string", + "enum":[ + "CREATING", + "CREATED", + "CREATE_FAILED", + "DELETING", + "DELETE_FAILED" + ] + }, + "ImageVersions":{ + "type":"list", + "member":{"shape":"ImageVersion"} + }, + "Images":{ + "type":"list", + "member":{"shape":"Image"} + }, + "InferenceSpecification":{ + "type":"structure", + "required":[ + "Containers", + "SupportedContentTypes", + "SupportedResponseMIMETypes" + ], + "members":{ + "Containers":{ + "shape":"ModelPackageContainerDefinitionList", + "documentation":"

The Amazon ECR registry path of the Docker image that contains the inference code.

" + }, + "SupportedTransformInstanceTypes":{ + "shape":"TransformInstanceTypes", + "documentation":"

A list of the instance types on which a transformation job can be run or on which an endpoint can be deployed.

This parameter is required for unversioned models, and optional for versioned models.

" + }, + "SupportedRealtimeInferenceInstanceTypes":{ + "shape":"RealtimeInferenceInstanceTypes", + "documentation":"

A list of the instance types that are used to generate inferences in real-time.

This parameter is required for unversioned models, and optional for versioned models.

" }, "SupportedContentTypes":{ "shape":"ContentTypes", @@ -8210,7 +11513,7 @@ }, "DataInputConfig":{ "shape":"DataInputConfig", - "documentation":"

Specifies the name and shape of the expected data inputs for your trained model with a JSON dictionary form. The data inputs are InputConfig$Framework specific.

  • TensorFlow: You must specify the name and shape (NHWC format) of the expected data inputs using a dictionary format for your trained model. The dictionary formats required for the console and CLI are different.

    • Examples for one input:

      • If using the console, {\"input\":[1,1024,1024,3]}

      • If using the CLI, {\\\"input\\\":[1,1024,1024,3]}

    • Examples for two inputs:

      • If using the console, {\"data1\": [1,28,28,1], \"data2\":[1,28,28,1]}

      • If using the CLI, {\\\"data1\\\": [1,28,28,1], \\\"data2\\\":[1,28,28,1]}

  • KERAS: You must specify the name and shape (NCHW format) of expected data inputs using a dictionary format for your trained model. Note that while Keras model artifacts should be uploaded in NHWC (channel-last) format, DataInputConfig should be specified in NCHW (channel-first) format. The dictionary formats required for the console and CLI are different.

    • Examples for one input:

      • If using the console, {\"input_1\":[1,3,224,224]}

      • If using the CLI, {\\\"input_1\\\":[1,3,224,224]}

    • Examples for two inputs:

      • If using the console, {\"input_1\": [1,3,224,224], \"input_2\":[1,3,224,224]}

      • If using the CLI, {\\\"input_1\\\": [1,3,224,224], \\\"input_2\\\":[1,3,224,224]}

  • MXNET/ONNX: You must specify the name and shape (NCHW format) of the expected data inputs in order using a dictionary format for your trained model. The dictionary formats required for the console and CLI are different.

    • Examples for one input:

      • If using the console, {\"data\":[1,3,1024,1024]}

      • If using the CLI, {\\\"data\\\":[1,3,1024,1024]}

    • Examples for two inputs:

      • If using the console, {\"var1\": [1,1,28,28], \"var2\":[1,1,28,28]}

      • If using the CLI, {\\\"var1\\\": [1,1,28,28], \\\"var2\\\":[1,1,28,28]}

  • PyTorch: You can either specify the name and shape (NCHW format) of expected data inputs in order using a dictionary format for your trained model or you can specify the shape only using a list format. The dictionary formats required for the console and CLI are different. The list formats for the console and CLI are the same.

    • Examples for one input in dictionary format:

      • If using the console, {\"input0\":[1,3,224,224]}

      • If using the CLI, {\\\"input0\\\":[1,3,224,224]}

    • Example for one input in list format: [[1,3,224,224]]

    • Examples for two inputs in dictionary format:

      • If using the console, {\"input0\":[1,3,224,224], \"input1\":[1,3,224,224]}

      • If using the CLI, {\\\"input0\\\":[1,3,224,224], \\\"input1\\\":[1,3,224,224]}

    • Example for two inputs in list format: [[1,3,224,224], [1,3,224,224]]

  • XGBOOST: input data name and shape are not needed.

DataInputConfig supports the following parameters for CoreML OutputConfig$TargetDevice (ML Model format):

  • shape: Input shape, for example {\"input_1\": {\"shape\": [1,224,224,3]}}. In addition to static input shapes, CoreML converter supports Flexible input shapes:

    • Range Dimension. You can use the Range Dimension feature if you know the input shape will be within some specific interval in that dimension, for example: {\"input_1\": {\"shape\": [\"1..10\", 224, 224, 3]}}

    • Enumerated shapes. Sometimes, the models are trained to work only on a select set of inputs. You can enumerate all supported input shapes, for example: {\"input_1\": {\"shape\": [[1, 224, 224, 3], [1, 160, 160, 3]]}}

  • default_shape: Default input shape. You can set a default shape during conversion for both Range Dimension and Enumerated Shapes. For example {\"input_1\": {\"shape\": [\"1..10\", 224, 224, 3], \"default_shape\": [1, 224, 224, 3]}}

  • type: Input type. Allowed values: Image and Tensor. By default, the converter generates an ML Model with inputs of type Tensor (MultiArray). User can set input type to be Image. Image input type requires additional input parameters such as bias and scale.

  • bias: If the input type is an Image, you need to provide the bias vector.

  • scale: If the input type is an Image, you need to provide a scale factor.

CoreML ClassifierConfig parameters can be specified using OutputConfig$CompilerOptions. CoreML converter supports Tensorflow and PyTorch models. CoreML conversion examples:

  • Tensor type input:

    • \"DataInputConfig\": {\"input_1\": {\"shape\": [[1,224,224,3], [1,160,160,3]], \"default_shape\": [1,224,224,3]}}

  • Tensor type input without input name (PyTorch):

    • \"DataInputConfig\": [{\"shape\": [[1,3,224,224], [1,3,160,160]], \"default_shape\": [1,3,224,224]}]

  • Image type input:

    • \"DataInputConfig\": {\"input_1\": {\"shape\": [[1,224,224,3], [1,160,160,3]], \"default_shape\": [1,224,224,3], \"type\": \"Image\", \"bias\": [-1,-1,-1], \"scale\": 0.007843137255}}

    • \"CompilerOptions\": {\"class_labels\": \"imagenet_labels_1000.txt\"}

  • Image type input without input name (PyTorch):

    • \"DataInputConfig\": [{\"shape\": [[1,3,224,224], [1,3,160,160]], \"default_shape\": [1,3,224,224], \"type\": \"Image\", \"bias\": [-1,-1,-1], \"scale\": 0.007843137255}]

    • \"CompilerOptions\": {\"class_labels\": \"imagenet_labels_1000.txt\"}

" + "documentation":"

Specifies the name and shape of the expected data inputs for your trained model with a JSON dictionary form. The data inputs are InputConfig$Framework specific.

  • TensorFlow: You must specify the name and shape (NHWC format) of the expected data inputs using a dictionary format for your trained model. The dictionary formats required for the console and CLI are different.

    • Examples for one input:

      • If using the console, {\"input\":[1,1024,1024,3]}

      • If using the CLI, {\\\"input\\\":[1,1024,1024,3]}

    • Examples for two inputs:

      • If using the console, {\"data1\": [1,28,28,1], \"data2\":[1,28,28,1]}

      • If using the CLI, {\\\"data1\\\": [1,28,28,1], \\\"data2\\\":[1,28,28,1]}

  • KERAS: You must specify the name and shape (NCHW format) of expected data inputs using a dictionary format for your trained model. Note that while Keras model artifacts should be uploaded in NHWC (channel-last) format, DataInputConfig should be specified in NCHW (channel-first) format. The dictionary formats required for the console and CLI are different.

    • Examples for one input:

      • If using the console, {\"input_1\":[1,3,224,224]}

      • If using the CLI, {\\\"input_1\\\":[1,3,224,224]}

    • Examples for two inputs:

      • If using the console, {\"input_1\": [1,3,224,224], \"input_2\":[1,3,224,224]}

      • If using the CLI, {\\\"input_1\\\": [1,3,224,224], \\\"input_2\\\":[1,3,224,224]}

  • MXNET/ONNX/DARKNET: You must specify the name and shape (NCHW format) of the expected data inputs in order using a dictionary format for your trained model. The dictionary formats required for the console and CLI are different.

    • Examples for one input:

      • If using the console, {\"data\":[1,3,1024,1024]}

      • If using the CLI, {\\\"data\\\":[1,3,1024,1024]}

    • Examples for two inputs:

      • If using the console, {\"var1\": [1,1,28,28], \"var2\":[1,1,28,28]}

      • If using the CLI, {\\\"var1\\\": [1,1,28,28], \\\"var2\\\":[1,1,28,28]}

  • PyTorch: You can either specify the name and shape (NCHW format) of expected data inputs in order using a dictionary format for your trained model or you can specify the shape only using a list format. The dictionary formats required for the console and CLI are different. The list formats for the console and CLI are the same.

    • Examples for one input in dictionary format:

      • If using the console, {\"input0\":[1,3,224,224]}

      • If using the CLI, {\\\"input0\\\":[1,3,224,224]}

    • Example for one input in list format: [[1,3,224,224]]

    • Examples for two inputs in dictionary format:

      • If using the console, {\"input0\":[1,3,224,224], \"input1\":[1,3,224,224]}

      • If using the CLI, {\\\"input0\\\":[1,3,224,224], \\\"input1\\\":[1,3,224,224]}

    • Example for two inputs in list format: [[1,3,224,224], [1,3,224,224]]

  • XGBOOST: input data name and shape are not needed.

DataInputConfig supports the following parameters for CoreML OutputConfig$TargetDevice (ML Model format):

  • shape: Input shape, for example {\"input_1\": {\"shape\": [1,224,224,3]}}. In addition to static input shapes, CoreML converter supports Flexible input shapes:

    • Range Dimension. You can use the Range Dimension feature if you know the input shape will be within some specific interval in that dimension, for example: {\"input_1\": {\"shape\": [\"1..10\", 224, 224, 3]}}

    • Enumerated shapes. Sometimes, the models are trained to work only on a select set of inputs. You can enumerate all supported input shapes, for example: {\"input_1\": {\"shape\": [[1, 224, 224, 3], [1, 160, 160, 3]]}}

  • default_shape: Default input shape. You can set a default shape during conversion for both Range Dimension and Enumerated Shapes. For example {\"input_1\": {\"shape\": [\"1..10\", 224, 224, 3], \"default_shape\": [1, 224, 224, 3]}}

  • type: Input type. Allowed values: Image and Tensor. By default, the converter generates an ML Model with inputs of type Tensor (MultiArray). User can set input type to be Image. Image input type requires additional input parameters such as bias and scale.

  • bias: If the input type is an Image, you need to provide the bias vector.

  • scale: If the input type is an Image, you need to provide a scale factor.

CoreML ClassifierConfig parameters can be specified using OutputConfig$CompilerOptions. CoreML converter supports Tensorflow and PyTorch models. CoreML conversion examples:

  • Tensor type input:

    • \"DataInputConfig\": {\"input_1\": {\"shape\": [[1,224,224,3], [1,160,160,3]], \"default_shape\": [1,224,224,3]}}

  • Tensor type input without input name (PyTorch):

    • \"DataInputConfig\": [{\"shape\": [[1,3,224,224], [1,3,160,160]], \"default_shape\": [1,3,224,224]}]

  • Image type input:

    • \"DataInputConfig\": {\"input_1\": {\"shape\": [[1,224,224,3], [1,160,160,3]], \"default_shape\": [1,224,224,3], \"type\": \"Image\", \"bias\": [-1,-1,-1], \"scale\": 0.007843137255}}

    • \"CompilerOptions\": {\"class_labels\": \"imagenet_labels_1000.txt\"}

  • Image type input without input name (PyTorch):

    • \"DataInputConfig\": [{\"shape\": [[1,3,224,224], [1,3,160,160]], \"default_shape\": [1,3,224,224], \"type\": \"Image\", \"bias\": [-1,-1,-1], \"scale\": 0.007843137255}]

    • \"CompilerOptions\": {\"class_labels\": \"imagenet_labels_1000.txt\"}

" }, "Framework":{ "shape":"Framework", @@ -8225,6 +11528,13 @@ "max":20, "min":1 }, + "InputMode":{ + "type":"string", + "enum":[ + "Pipe", + "File" + ] + }, "InputModes":{ "type":"list", "member":{"shape":"TrainingInputMode"}, @@ -8374,20 +11684,68 @@ "members":{ "DefaultResourceSpec":{ "shape":"ResourceSpec", - "documentation":"

The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance.

" + "documentation":"

The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the JupyterServer app.

" } }, - "documentation":"

Jupyter server's app settings.

" + "documentation":"

The JupyterServer app settings.

" + }, + "KernelDisplayName":{ + "type":"string", + "max":1024 }, "KernelGatewayAppSettings":{ "type":"structure", "members":{ "DefaultResourceSpec":{ "shape":"ResourceSpec", - "documentation":"

The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance.

" + "documentation":"

The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.

" + }, + "CustomImages":{ + "shape":"CustomImages", + "documentation":"

A list of custom SageMaker images that are configured to run as a KernelGateway app.

" + } + }, + "documentation":"

The KernelGateway app settings.

" + }, + "KernelGatewayImageConfig":{ + "type":"structure", + "required":["KernelSpecs"], + "members":{ + "KernelSpecs":{ + "shape":"KernelSpecs", + "documentation":"

The specification of the Jupyter kernels in the image.

" + }, + "FileSystemConfig":{ + "shape":"FileSystemConfig", + "documentation":"

The Amazon Elastic File System (EFS) storage configuration for a SageMaker image.

" + } + }, + "documentation":"

The configuration for the file system and kernels in a SageMaker image running as a KernelGateway app.

" + }, + "KernelName":{ + "type":"string", + "max":1024 + }, + "KernelSpec":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"KernelName", + "documentation":"

The name of the kernel.

" + }, + "DisplayName":{ + "shape":"KernelDisplayName", + "documentation":"

The display name of the kernel.

" } }, - "documentation":"

The kernel gateway app settings.

" + "documentation":"

The specification of a Jupyter kernel.

" + }, + "KernelSpecs":{ + "type":"list", + "member":{"shape":"KernelSpec"}, + "max":1, + "min":1 }, "KmsKeyId":{ "type":"string", @@ -8398,7 +11756,7 @@ "type":"string", "max":127, "min":1, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9])*" + "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,126}" }, "LabelCounter":{ "type":"integer", @@ -8559,7 +11917,7 @@ "type":"string", "max":63, "min":1, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9])*" + "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" }, "LabelingJobOutput":{ "type":"structure", @@ -8723,9 +12081,65 @@ "LambdaFunctionArn":{ "type":"string", "max":2048, - "pattern":"arn:aws[a-z\\-]*:lambda:[a-z]{2}-[a-z]+-\\d{1}:\\d{12}:function:[a-zA-Z0-9-_\\.]+(:(\\$LATEST|[a-zA-Z0-9-_]+))?" + "pattern":"arn:aws[a-z\\-]*:lambda:[a-z0-9\\-]*:[0-9]{12}:function:.*" }, "LastModifiedTime":{"type":"timestamp"}, + "LineageEntityParameters":{ + "type":"map", + "key":{"shape":"StringParameterValue"}, + "value":{"shape":"StringParameterValue"}, + "max":30 + }, + "ListActionsRequest":{ + "type":"structure", + "members":{ + "SourceUri":{ + "shape":"SourceUri", + "documentation":"

A filter that returns only actions with the specified source URI.

" + }, + "ActionType":{ + "shape":"String256", + "documentation":"

A filter that returns only actions of the specified type.

" + }, + "CreatedAfter":{ + "shape":"Timestamp", + "documentation":"

A filter that returns only actions created on or after the specified time.

" + }, + "CreatedBefore":{ + "shape":"Timestamp", + "documentation":"

A filter that returns only actions created on or before the specified time.

" + }, + "SortBy":{ + "shape":"SortActionsBy", + "documentation":"

The property used to sort results. The default value is CreationTime.

" + }, + "SortOrder":{ + "shape":"SortOrder", + "documentation":"

The sort order. The default value is Descending.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the previous call to ListActions didn't return the full set of actions, the call returns a token for getting the next set of actions.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of actions to return in the response. The default value is 10.

" + } + } + }, + "ListActionsResponse":{ + "type":"structure", + "members":{ + "ActionSummaries":{ + "shape":"ActionSummaries", + "documentation":"

A list of actions and their properties.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A token for getting the next set of actions, if there are any.

" + } + } + }, "ListAlgorithmsInput":{ "type":"structure", "members":{ @@ -8773,6 +12187,60 @@ } } }, + "ListAppImageConfigsRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of AppImageConfigs to return in the response. The default value is 10.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the previous call to ListImages didn't return the full set of AppImageConfigs, the call returns a token for getting the next set of AppImageConfigs.

" + }, + "NameContains":{ + "shape":"AppImageConfigName", + "documentation":"

A filter that returns only AppImageConfigs whose name contains the specified string.

" + }, + "CreationTimeBefore":{ + "shape":"Timestamp", + "documentation":"

A filter that returns only AppImageConfigs created on or before the specified time.

" + }, + "CreationTimeAfter":{ + "shape":"Timestamp", + "documentation":"

A filter that returns only AppImageConfigs created on or after the specified time.

" + }, + "ModifiedTimeBefore":{ + "shape":"Timestamp", + "documentation":"

A filter that returns only AppImageConfigs modified on or before the specified time.

" + }, + "ModifiedTimeAfter":{ + "shape":"Timestamp", + "documentation":"

A filter that returns only AppImageConfigs modified on or after the specified time.

" + }, + "SortBy":{ + "shape":"AppImageConfigSortKey", + "documentation":"

The property used to sort results. The default value is CreationTime.

" + }, + "SortOrder":{ + "shape":"SortOrder", + "documentation":"

The sort order. The default value is Descending.

" + } + } + }, + "ListAppImageConfigsResponse":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

A token for getting the next set of AppImageConfigs, if there are any.

" + }, + "AppImageConfigs":{ + "shape":"AppImageConfigList", + "documentation":"

A list of AppImageConfigs and their properties.

" + } + } + }, "ListAppsRequest":{ "type":"structure", "members":{ @@ -8815,6 +12283,118 @@ } } }, + "ListArtifactsRequest":{ + "type":"structure", + "members":{ + "SourceUri":{ + "shape":"SourceUri", + "documentation":"

A filter that returns only artifacts with the specified source URI.

" + }, + "ArtifactType":{ + "shape":"String256", + "documentation":"

A filter that returns only artifacts of the specified type.

" + }, + "CreatedAfter":{ + "shape":"Timestamp", + "documentation":"

A filter that returns only artifacts created on or after the specified time.

" + }, + "CreatedBefore":{ + "shape":"Timestamp", + "documentation":"

A filter that returns only artifacts created on or before the specified time.

" + }, + "SortBy":{ + "shape":"SortArtifactsBy", + "documentation":"

The property used to sort results. The default value is CreationTime.

" + }, + "SortOrder":{ + "shape":"SortOrder", + "documentation":"

The sort order. The default value is Descending.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the previous call to ListArtifacts didn't return the full set of artifacts, the call returns a token for getting the next set of artifacts.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of artifacts to return in the response. The default value is 10.

" + } + } + }, + "ListArtifactsResponse":{ + "type":"structure", + "members":{ + "ArtifactSummaries":{ + "shape":"ArtifactSummaries", + "documentation":"

A list of artifacts and their properties.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A token for getting the next set of artifacts, if there are any.

" + } + } + }, + "ListAssociationsRequest":{ + "type":"structure", + "members":{ + "SourceArn":{ + "shape":"AssociationEntityArn", + "documentation":"

A filter that returns only associations with the specified source ARN.

" + }, + "DestinationArn":{ + "shape":"AssociationEntityArn", + "documentation":"

A filter that returns only associations with the specified destination Amazon Resource Name (ARN).

" + }, + "SourceType":{ + "shape":"String256", + "documentation":"

A filter that returns only associations with the specified source type.

" + }, + "DestinationType":{ + "shape":"String256", + "documentation":"

A filter that returns only associations with the specified destination type.

" + }, + "AssociationType":{ + "shape":"AssociationEdgeType", + "documentation":"

A filter that returns only associations of the specified type.

" + }, + "CreatedAfter":{ + "shape":"Timestamp", + "documentation":"

A filter that returns only associations created on or after the specified time.

" + }, + "CreatedBefore":{ + "shape":"Timestamp", + "documentation":"

A filter that returns only associations created on or before the specified time.

" + }, + "SortBy":{ + "shape":"SortAssociationsBy", + "documentation":"

The property used to sort results. The default value is CreationTime.

" + }, + "SortOrder":{ + "shape":"SortOrder", + "documentation":"

The sort order. The default value is Descending.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the previous call to ListAssociations didn't return the full set of associations, the call returns a token for getting the next set of associations.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of associations to return in the response. The default value is 10.

" + } + } + }, + "ListAssociationsResponse":{ + "type":"structure", + "members":{ + "AssociationSummaries":{ + "shape":"AssociationSummaries", + "documentation":"

A list of associations and their properties.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A token for getting the next set of associations, if there are any.

" + } + } + }, "ListAutoMLJobsRequest":{ "type":"structure", "members":{ @@ -9047,29 +12627,79 @@ "Status" ] }, - "ListDomainsRequest":{ + "ListContextsRequest":{ "type":"structure", "members":{ + "SourceUri":{ + "shape":"SourceUri", + "documentation":"

A filter that returns only contexts with the specified source URI.

" + }, + "ContextType":{ + "shape":"String256", + "documentation":"

A filter that returns only contexts of the specified type.

" + }, + "CreatedAfter":{ + "shape":"Timestamp", + "documentation":"

A filter that returns only contexts created on or after the specified time.

" + }, + "CreatedBefore":{ + "shape":"Timestamp", + "documentation":"

A filter that returns only contexts created on or before the specified time.

" + }, + "SortBy":{ + "shape":"SortContextsBy", + "documentation":"

The property used to sort results. The default value is CreationTime.

" + }, + "SortOrder":{ + "shape":"SortOrder", + "documentation":"

The sort order. The default value is Descending.

" + }, "NextToken":{ "shape":"NextToken", - "documentation":"

If the previous response was truncated, you will receive this token. Use it in your next request to receive the next set of results.

" + "documentation":"

If the previous call to ListContexts didn't return the full set of contexts, the call returns a token for getting the next set of contexts.

" }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

Returns a list up to a specified limit.

" + "documentation":"

The maximum number of contexts to return in the response. The default value is 10.

" } } }, - "ListDomainsResponse":{ + "ListContextsResponse":{ "type":"structure", "members":{ - "Domains":{ - "shape":"DomainList", - "documentation":"

The list of domains.

" + "ContextSummaries":{ + "shape":"ContextSummaries", + "documentation":"

A list of contexts and their properties.

" }, "NextToken":{ "shape":"NextToken", - "documentation":"

If the previous response was truncated, you will receive this token. Use it in your next request to receive the next set of results.

" + "documentation":"

A token for getting the next set of contexts, if there are any.

" + } + } + }, + "ListDomainsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the previous response was truncated, you will receive this token. Use it in your next request to receive the next set of results.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

Returns a list up to a specified limit.

" + } + } + }, + "ListDomainsResponse":{ + "type":"structure", + "members":{ + "Domains":{ + "shape":"DomainList", + "documentation":"

The list of domains.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the previous response was truncated, you will receive this token. Use it in your next request to receive the next set of results.

" } } }, @@ -9221,6 +12851,64 @@ } } }, + "ListFeatureGroupsRequest":{ + "type":"structure", + "members":{ + "NameContains":{ + "shape":"FeatureGroupNameContains", + "documentation":"

A string that partially matches one or more FeatureGroups names. Filters FeatureGroups by name.

" + }, + "FeatureGroupStatusEquals":{ + "shape":"FeatureGroupStatus", + "documentation":"

A FeatureGroup status. Filters by FeatureGroup status.

" + }, + "OfflineStoreStatusEquals":{ + "shape":"OfflineStoreStatusValue", + "documentation":"

An OfflineStore status. Filters by OfflineStore status.

" + }, + "CreationTimeAfter":{ + "shape":"CreationTime", + "documentation":"

Use this parameter to search for FeatureGroupss created after a specific date and time.

" + }, + "CreationTimeBefore":{ + "shape":"CreationTime", + "documentation":"

Use this parameter to search for FeatureGroupss created before a specific date and time.

" + }, + "SortOrder":{ + "shape":"FeatureGroupSortOrder", + "documentation":"

The order in which feature groups are listed.

" + }, + "SortBy":{ + "shape":"FeatureGroupSortBy", + "documentation":"

The value on which the feature group list is sorted.

" + }, + "MaxResults":{ + "shape":"FeatureGroupMaxResults", + "documentation":"

The maximum number of results returned by ListFeatureGroups.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A token to resume pagination of ListFeatureGroups results.

" + } + } + }, + "ListFeatureGroupsResponse":{ + "type":"structure", + "required":[ + "FeatureGroupSummaries", + "NextToken" + ], + "members":{ + "FeatureGroupSummaries":{ + "shape":"FeatureGroupSummaries", + "documentation":"

A summary of feature groups.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A token to resume pagination of ListFeatureGroups results.

" + } + } + }, "ListFlowDefinitionsRequest":{ "type":"structure", "members":{ @@ -9361,6 +13049,115 @@ } } }, + "ListImageVersionsRequest":{ + "type":"structure", + "required":["ImageName"], + "members":{ + "CreationTimeAfter":{ + "shape":"Timestamp", + "documentation":"

A filter that returns only versions created on or after the specified time.

" + }, + "CreationTimeBefore":{ + "shape":"Timestamp", + "documentation":"

A filter that returns only versions created on or before the specified time.

" + }, + "ImageName":{ + "shape":"ImageName", + "documentation":"

The name of the image to list the versions of.

" + }, + "LastModifiedTimeAfter":{ + "shape":"Timestamp", + "documentation":"

A filter that returns only versions modified on or after the specified time.

" + }, + "LastModifiedTimeBefore":{ + "shape":"Timestamp", + "documentation":"

A filter that returns only versions modified on or before the specified time.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of versions to return in the response. The default value is 10.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the previous call to ListImageVersions didn't return the full set of versions, the call returns a token for getting the next set of versions.

" + }, + "SortBy":{ + "shape":"ImageVersionSortBy", + "documentation":"

The property used to sort results. The default value is CREATION_TIME.

" + }, + "SortOrder":{ + "shape":"ImageVersionSortOrder", + "documentation":"

The sort order. The default value is DESCENDING.

" + } + } + }, + "ListImageVersionsResponse":{ + "type":"structure", + "members":{ + "ImageVersions":{ + "shape":"ImageVersions", + "documentation":"

A list of versions and their properties.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A token for getting the next set of versions, if there are any.

" + } + } + }, + "ListImagesRequest":{ + "type":"structure", + "members":{ + "CreationTimeAfter":{ + "shape":"Timestamp", + "documentation":"

A filter that returns only images created on or after the specified time.

" + }, + "CreationTimeBefore":{ + "shape":"Timestamp", + "documentation":"

A filter that returns only images created on or before the specified time.

" + }, + "LastModifiedTimeAfter":{ + "shape":"Timestamp", + "documentation":"

A filter that returns only images modified on or after the specified time.

" + }, + "LastModifiedTimeBefore":{ + "shape":"Timestamp", + "documentation":"

A filter that returns only images modified on or before the specified time.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of images to return in the response. The default value is 10.

" + }, + "NameContains":{ + "shape":"ImageNameContains", + "documentation":"

A filter that returns only images whose name contains the specified string.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the previous call to ListImages didn't return the full set of images, the call returns a token for getting the next set of images.

" + }, + "SortBy":{ + "shape":"ImageSortBy", + "documentation":"

The property used to sort results. The default value is CREATION_TIME.

" + }, + "SortOrder":{ + "shape":"ImageSortOrder", + "documentation":"

The sort order. The default value is DESCENDING.

" + } + } + }, + "ListImagesResponse":{ + "type":"structure", + "members":{ + "Images":{ + "shape":"Images", + "documentation":"

A list of images and their properties.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A token for getting the next set of images, if there are any.

" + } + } + }, "ListLabelingJobsForWorkteamRequest":{ "type":"structure", "required":["WorkteamArn"], @@ -9475,6 +13272,57 @@ } } }, + "ListLineageEntityParameterKey":{ + "type":"list", + "member":{"shape":"StringParameterValue"} + }, + "ListModelPackageGroupsInput":{ + "type":"structure", + "members":{ + "CreationTimeAfter":{ + "shape":"CreationTime", + "documentation":"

A filter that returns only model groups created after the specified time.

" + }, + "CreationTimeBefore":{ + "shape":"CreationTime", + "documentation":"

A filter that returns only model groups created before the specified time.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to return in the response.

" + }, + "NameContains":{ + "shape":"NameContains", + "documentation":"

A string in the model group name. This filter returns only model groups whose name contains the specified string.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the result of the previous ListModelPackageGroups request was truncated, the response includes a NextToken. To retrieve the next set of model groups, use the token in the next request.

" + }, + "SortBy":{ + "shape":"ModelPackageGroupSortBy", + "documentation":"

The field to sort results by. The default is CreationTime.

" + }, + "SortOrder":{ + "shape":"SortOrder", + "documentation":"

The sort order for results. The default is Ascending.

" + } + } + }, + "ListModelPackageGroupsOutput":{ + "type":"structure", + "required":["ModelPackageGroupSummaryList"], + "members":{ + "ModelPackageGroupSummaryList":{ + "shape":"ModelPackageGroupSummaryList", + "documentation":"

A list of summaries of the model groups in your AWS account.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the response is truncated, SageMaker returns this token. To retrieve the next set of model groups, use it in the subsequent request.

" + } + } + }, "ListModelPackagesInput":{ "type":"structure", "members":{ @@ -9494,6 +13342,18 @@ "shape":"NameContains", "documentation":"

A string in the model package name. This filter returns only model packages whose name contains the specified string.

" }, + "ModelApprovalStatus":{ + "shape":"ModelApprovalStatus", + "documentation":"

A filter that returns only the model packages with the specified approval status.

" + }, + "ModelPackageGroupName":{ + "shape":"ArnOrName", + "documentation":"

A filter that returns only model versions that belong to the specified model group.

" + }, + "ModelPackageType":{ + "shape":"ModelPackageType", + "documentation":"

A filter that returns onlyl the model packages of the specified type. This can be one of the following values.

  • VERSIONED - List only versioned models.

  • UNVERSIONED - List only unversioined models.

  • BOTH - List both versioned and unversioned models.

" + }, "NextToken":{ "shape":"NextToken", "documentation":"

If the response to a previous ListModelPackages request was truncated, the response includes a NextToken. To retrieve the next set of model packages, use the token in the next request.

" @@ -9827,109 +13687,314 @@ } } }, - "ListProcessingJobsRequest":{ + "ListPipelineExecutionStepsRequest":{ "type":"structure", "members":{ - "CreationTimeAfter":{ - "shape":"Timestamp", - "documentation":"

A filter that returns only processing jobs created after the specified time.

" + "PipelineExecutionArn":{ + "shape":"PipelineExecutionArn", + "documentation":"

The Amazon Resource Name (ARN) of the pipeline execution.

" }, - "CreationTimeBefore":{ - "shape":"Timestamp", - "documentation":"

A filter that returns only processing jobs created after the specified time.

" + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the result of the previous ListPipelineExecutionSteps request was truncated, the response includes a NextToken. To retrieve the next set of pipeline execution steps, use the token in the next request.

" }, - "LastModifiedTimeAfter":{ - "shape":"Timestamp", - "documentation":"

A filter that returns only processing jobs modified after the specified time.

" + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of pipeline execution steps to return in the response.

" }, - "LastModifiedTimeBefore":{ - "shape":"Timestamp", - "documentation":"

A filter that returns only processing jobs modified before the specified time.

" + "SortOrder":{ + "shape":"SortOrder", + "documentation":"

The field by which to sort results. The default is CreatedTime.

" + } + } + }, + "ListPipelineExecutionStepsResponse":{ + "type":"structure", + "members":{ + "PipelineExecutionSteps":{ + "shape":"PipelineExecutionStepList", + "documentation":"

A list of PipeLineExecutionStep objects. Each PipeLineExecutionStep consists of StepName, StartTime, EndTime, StepStatus, and Metadata. Metadata is an object with properties for each job that contains relevant information about the job created by the step.

" }, - "NameContains":{ - "shape":"String", - "documentation":"

A string in the processing job name. This filter returns only processing jobs whose name contains the specified string.

" + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the result of the previous ListPipelineExecutionSteps request was truncated, the response includes a NextToken. To retrieve the next set of pipeline execution steps, use the token in the next request.

" + } + } + }, + "ListPipelineExecutionsRequest":{ + "type":"structure", + "required":["PipelineName"], + "members":{ + "PipelineName":{ + "shape":"PipelineName", + "documentation":"

The name of the pipeline.

" }, - "StatusEquals":{ - "shape":"ProcessingJobStatus", - "documentation":"

A filter that retrieves only processing jobs with a specific status.

" + "CreatedAfter":{ + "shape":"Timestamp", + "documentation":"

A filter that returns the pipeline executions that were created after a specified time.

" + }, + "CreatedBefore":{ + "shape":"Timestamp", + "documentation":"

A filter that returns the pipeline executions that were created before a specified time.

" }, "SortBy":{ - "shape":"SortBy", - "documentation":"

The field to sort results by. The default is CreationTime.

" + "shape":"SortPipelineExecutionsBy", + "documentation":"

The field by which to sort results. The default is CreatedTime.

" }, "SortOrder":{ "shape":"SortOrder", - "documentation":"

The sort order for results. The default is Ascending.

" + "documentation":"

The sort order for results.

" }, "NextToken":{ "shape":"NextToken", - "documentation":"

If the result of the previous ListProcessingJobs request was truncated, the response includes a NextToken. To retrieve the next set of processing jobs, use the token in the next request.

" + "documentation":"

If the result of the previous ListPipelineExecutions request was truncated, the response includes a NextToken. To retrieve the next set of pipeline executions, use the token in the next request.

" }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

The maximum number of processing jobs to return in the response.

", - "box":true + "documentation":"

The maximum number of pipeline executions to return in the response.

" } } }, - "ListProcessingJobsResponse":{ + "ListPipelineExecutionsResponse":{ "type":"structure", - "required":["ProcessingJobSummaries"], "members":{ - "ProcessingJobSummaries":{ - "shape":"ProcessingJobSummaries", - "documentation":"

An array of ProcessingJobSummary objects, each listing a processing job.

" + "PipelineExecutionSummaries":{ + "shape":"PipelineExecutionSummaryList", + "documentation":"

Contains a sorted list of pipeline execution summary objects matching the specified filters. Each run summary includes the Amazon Resource Name (ARN) of the pipeline execution, the run date, and the status. This list can be empty.

" }, "NextToken":{ "shape":"NextToken", - "documentation":"

If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of processing jobs, use it in the subsequent request.

" + "documentation":"

If the result of the previous ListPipelineExecutions request was truncated, the response includes a NextToken. To retrieve the next set of pipeline executions, use the token in the next request.

" } } }, - "ListSubscribedWorkteamsRequest":{ + "ListPipelineParametersForExecutionRequest":{ "type":"structure", + "required":["PipelineExecutionArn"], "members":{ - "NameContains":{ - "shape":"WorkteamName", - "documentation":"

A string in the work team name. This filter returns only work teams whose name contains the specified string.

" + "PipelineExecutionArn":{ + "shape":"PipelineExecutionArn", + "documentation":"

The Amazon Resource Name (ARN) of the pipeline execution.

" }, "NextToken":{ "shape":"NextToken", - "documentation":"

If the result of the previous ListSubscribedWorkteams request was truncated, the response includes a NextToken. To retrieve the next set of labeling jobs, use the token in the next request.

" + "documentation":"

If the result of the previous ListPipelineParametersForExecution request was truncated, the response includes a NextToken. To retrieve the next set of parameters, use the token in the next request.

" }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

The maximum number of work teams to return in each page of the response.

", - "box":true + "documentation":"

The maximum number of parameters to return in the response.

" } } }, - "ListSubscribedWorkteamsResponse":{ + "ListPipelineParametersForExecutionResponse":{ "type":"structure", - "required":["SubscribedWorkteams"], "members":{ - "SubscribedWorkteams":{ - "shape":"SubscribedWorkteams", - "documentation":"

An array of Workteam objects, each describing a work team.

" + "PipelineParameters":{ + "shape":"ParameterList", + "documentation":"

Contains a list of pipeline parameters. This list can be empty.

" }, "NextToken":{ "shape":"NextToken", - "documentation":"

If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of work teams, use it in the subsequent request.

" + "documentation":"

If the result of the previous ListPipelineParametersForExecution request was truncated, the response includes a NextToken. To retrieve the next set of parameters, use the token in the next request.

" } } }, - "ListTagsInput":{ + "ListPipelinesRequest":{ "type":"structure", - "required":["ResourceArn"], "members":{ - "ResourceArn":{ - "shape":"ResourceArn", - "documentation":"

The Amazon Resource Name (ARN) of the resource whose tags you want to retrieve.

" + "PipelineNamePrefix":{ + "shape":"PipelineName", + "documentation":"

The prefix of the pipeline name.

" + }, + "CreatedAfter":{ + "shape":"Timestamp", + "documentation":"

A filter that returns the pipelines that were created after a specified time.

" + }, + "CreatedBefore":{ + "shape":"Timestamp", + "documentation":"

A filter that returns the pipelines that were created before a specified time.

" + }, + "SortBy":{ + "shape":"SortPipelinesBy", + "documentation":"

The field by which to sort results. The default is CreatedTime.

" + }, + "SortOrder":{ + "shape":"SortOrder", + "documentation":"

The sort order for results.

" }, "NextToken":{ "shape":"NextToken", - "documentation":"

If the response to the previous ListTags request is truncated, Amazon SageMaker returns this token. To retrieve the next set of tags, use it in the subsequent request.

" + "documentation":"

If the result of the previous ListPipelines request was truncated, the response includes a NextToken. To retrieve the next set of pipelines, use the token in the next request.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of pipelines to return in the response.

" + } + } + }, + "ListPipelinesResponse":{ + "type":"structure", + "members":{ + "PipelineSummaries":{ + "shape":"PipelineSummaryList", + "documentation":"

Contains a sorted list of PipelineSummary objects matching the specified filters. Each PipelineSummary consists of PipelineArn, PipelineName, ExperimentName, PipelineDescription, CreationTime, LastModifiedTime, LastRunTime, and RoleArn. This list can be empty.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the result of the previous ListPipelines request was truncated, the response includes a NextToken. To retrieve the next set of pipelines, use the token in the next request.

" + } + } + }, + "ListProcessingJobsRequest":{ + "type":"structure", + "members":{ + "CreationTimeAfter":{ + "shape":"Timestamp", + "documentation":"

A filter that returns only processing jobs created after the specified time.

" + }, + "CreationTimeBefore":{ + "shape":"Timestamp", + "documentation":"

A filter that returns only processing jobs created after the specified time.

" + }, + "LastModifiedTimeAfter":{ + "shape":"Timestamp", + "documentation":"

A filter that returns only processing jobs modified after the specified time.

" + }, + "LastModifiedTimeBefore":{ + "shape":"Timestamp", + "documentation":"

A filter that returns only processing jobs modified before the specified time.

" + }, + "NameContains":{ + "shape":"String", + "documentation":"

A string in the processing job name. This filter returns only processing jobs whose name contains the specified string.

" + }, + "StatusEquals":{ + "shape":"ProcessingJobStatus", + "documentation":"

A filter that retrieves only processing jobs with a specific status.

" + }, + "SortBy":{ + "shape":"SortBy", + "documentation":"

The field to sort results by. The default is CreationTime.

" + }, + "SortOrder":{ + "shape":"SortOrder", + "documentation":"

The sort order for results. The default is Ascending.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the result of the previous ListProcessingJobs request was truncated, the response includes a NextToken. To retrieve the next set of processing jobs, use the token in the next request.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of processing jobs to return in the response.

", + "box":true + } + } + }, + "ListProcessingJobsResponse":{ + "type":"structure", + "required":["ProcessingJobSummaries"], + "members":{ + "ProcessingJobSummaries":{ + "shape":"ProcessingJobSummaries", + "documentation":"

An array of ProcessingJobSummary objects, each listing a processing job.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of processing jobs, use it in the subsequent request.

" + } + } + }, + "ListProjectsInput":{ + "type":"structure", + "members":{ + "CreationTimeAfter":{ + "shape":"Timestamp", + "documentation":"

A filter that returns the projects that were created after a specified time.

" + }, + "CreationTimeBefore":{ + "shape":"Timestamp", + "documentation":"

A filter that returns the projects that were created before a specified time.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of projects to return in the response.

" + }, + "NameContains":{ + "shape":"ProjectEntityName", + "documentation":"

A filter that returns the projects whose name contains a specified string.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the result of the previous ListProjects request was truncated, the response includes a NextToken. To retrieve the next set of projects, use the token in the next request.

" + }, + "SortBy":{ + "shape":"ProjectSortBy", + "documentation":"

The field by which to sort results. The default is CreationTime.

" + }, + "SortOrder":{ + "shape":"ProjectSortOrder", + "documentation":"

The sort order for results. The default is Ascending.

" + } + } + }, + "ListProjectsOutput":{ + "type":"structure", + "required":["ProjectSummaryList"], + "members":{ + "ProjectSummaryList":{ + "shape":"ProjectSummaryList", + "documentation":"

A list of summaries of projects.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the result of the previous ListCompilationJobs request was truncated, the response includes a NextToken. To retrieve the next set of model compilation jobs, use the token in the next request.

" + } + } + }, + "ListSubscribedWorkteamsRequest":{ + "type":"structure", + "members":{ + "NameContains":{ + "shape":"WorkteamName", + "documentation":"

A string in the work team name. This filter returns only work teams whose name contains the specified string.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the result of the previous ListSubscribedWorkteams request was truncated, the response includes a NextToken. To retrieve the next set of labeling jobs, use the token in the next request.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of work teams to return in each page of the response.

", + "box":true + } + } + }, + "ListSubscribedWorkteamsResponse":{ + "type":"structure", + "required":["SubscribedWorkteams"], + "members":{ + "SubscribedWorkteams":{ + "shape":"SubscribedWorkteams", + "documentation":"

An array of Workteam objects, each describing a work team.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of work teams, use it in the subsequent request.

" + } + } + }, + "ListTagsInput":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the resource whose tags you want to retrieve.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the response to the previous ListTags request is truncated, Amazon SageMaker returns this token. To retrieve the next set of tags, use it in the subsequent request.

" }, "MaxResults":{ "shape":"ListTagsMaxResults", @@ -10417,10 +14482,15 @@ "type":"integer", "min":1 }, + "MaximumExecutionTimeoutInSeconds":{ + "type":"integer", + "max":14400, + "min":600 + }, "MediaType":{ "type":"string", "max":64, - "pattern":"^[\\w]+\\/[\\w+]+$" + "pattern":"^[-\\w]+\\/[-\\w+]+$" }, "MemberDefinition":{ "type":"structure", @@ -10442,6 +14512,33 @@ "max":10, "min":1 }, + "MetadataProperties":{ + "type":"structure", + "members":{ + "CommitId":{ + "shape":"MetadataPropertyValue", + "documentation":"

The commit ID.

" + }, + "Repository":{ + "shape":"MetadataPropertyValue", + "documentation":"

The repository.

" + }, + "GeneratedBy":{ + "shape":"MetadataPropertyValue", + "documentation":"

The entity this entity was generated by.

" + }, + "ProjectId":{ + "shape":"MetadataPropertyValue", + "documentation":"

The project ID.

" + } + }, + "documentation":"

Metadata properties of the tracking entity, trial, or trial component.

" + }, + "MetadataPropertyValue":{ + "type":"string", + "max":1024, + "pattern":".*" + }, "MetricData":{ "type":"structure", "members":{ @@ -10497,6 +14594,36 @@ "pattern":".+" }, "MetricValue":{"type":"float"}, + "MetricsSource":{ + "type":"structure", + "required":[ + "ContentType", + "S3Uri" + ], + "members":{ + "ContentType":{ + "shape":"ContentType", + "documentation":"

" + }, + "ContentDigest":{ + "shape":"ContentDigest", + "documentation":"

" + }, + "S3Uri":{ + "shape":"S3Uri", + "documentation":"

" + } + }, + "documentation":"

" + }, + "ModelApprovalStatus":{ + "type":"string", + "enum":[ + "Approved", + "Rejected", + "PendingManualApproval" + ] + }, "ModelArn":{ "type":"string", "max":2048, @@ -10528,6 +14655,52 @@ }, "documentation":"

Configures the timeout and maximum number of retries for processing a transform job invocation.

" }, + "ModelDataQuality":{ + "type":"structure", + "members":{ + "Statistics":{ + "shape":"MetricsSource", + "documentation":"

Data quality statistics for a model.

" + }, + "Constraints":{ + "shape":"MetricsSource", + "documentation":"

Data quality constraints for a model.

" + } + }, + "documentation":"

Data quality constraints and statistics for a model.

" + }, + "ModelDigests":{ + "type":"structure", + "members":{ + "ArtifactDigest":{ + "shape":"ArtifactDigest", + "documentation":"

Provides a hash value that uniquely identifies the stored model artifacts.

" + } + }, + "documentation":"

Provides information to verify the integrity of stored model artifacts.

" + }, + "ModelMetrics":{ + "type":"structure", + "members":{ + "ModelQuality":{ + "shape":"ModelQuality", + "documentation":"

Metrics that measure the quality of a model.

" + }, + "ModelDataQuality":{ + "shape":"ModelDataQuality", + "documentation":"

Metrics that measure the quality of the input data for a model.

" + }, + "Bias":{ + "shape":"Bias", + "documentation":"

Metrics that measure bais in a model.

" + }, + "Explainability":{ + "shape":"Explainability", + "documentation":"

Metrics that help explain a model.

" + } + }, + "documentation":"

Contains metrics captured from a model.

" + }, "ModelName":{ "type":"string", "max":63, @@ -10538,6 +14711,71 @@ "max":63, "pattern":"[a-zA-Z0-9-]+" }, + "ModelPackage":{ + "type":"structure", + "members":{ + "ModelPackageName":{ + "shape":"EntityName", + "documentation":"

The name of the model.

" + }, + "ModelPackageGroupName":{ + "shape":"EntityName", + "documentation":"

The model group to which the model belongs.

" + }, + "ModelPackageVersion":{ + "shape":"ModelPackageVersion", + "documentation":"

The version number of a versioned model.

" + }, + "ModelPackageArn":{ + "shape":"ModelPackageArn", + "documentation":"

The Amazon Resource Name (ARN) of the model package.

" + }, + "ModelPackageDescription":{ + "shape":"EntityDescription", + "documentation":"

The description of the model package.

" + }, + "CreationTime":{ + "shape":"CreationTime", + "documentation":"

The time that the model package was created.

" + }, + "InferenceSpecification":{"shape":"InferenceSpecification"}, + "SourceAlgorithmSpecification":{"shape":"SourceAlgorithmSpecification"}, + "ValidationSpecification":{"shape":"ModelPackageValidationSpecification"}, + "ModelPackageStatus":{ + "shape":"ModelPackageStatus", + "documentation":"

The status of the model package. This can be one of the following values.

  • PENDING - The model package is pending being created.

  • IN_PROGRESS - The model package is in the process of being created.

  • COMPLETED - The model package was successfully created.

  • FAILED - The model package failed.

  • DELETING - The model package is in the process of being deleted.

" + }, + "ModelPackageStatusDetails":{"shape":"ModelPackageStatusDetails"}, + "CertifyForMarketplace":{ + "shape":"CertifyForMarketplace", + "documentation":"

Whether the model package is to be certified to be listed on AWS Marketplace. For information about listing model packages on AWS Marketplace, see List Your Algorithm or Model Package on AWS Marketplace.

" + }, + "ModelApprovalStatus":{ + "shape":"ModelApprovalStatus", + "documentation":"

The approval status of the model. This can be one of the following values.

  • APPROVED - The model is approved

  • REJECTED - The model is rejected.

  • PENDING_MANUAL_APPROVAL - The model is waiting for manual approval.

" + }, + "CreatedBy":{"shape":"UserContext"}, + "MetadataProperties":{"shape":"MetadataProperties"}, + "ModelMetrics":{ + "shape":"ModelMetrics", + "documentation":"

Metrics for the model.

" + }, + "LastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

The last time the model package was modified.

" + }, + "LastModifiedBy":{"shape":"UserContext"}, + "ApprovalDescription":{ + "shape":"ApprovalDescription", + "documentation":"

A description provided when the model approval is set.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

A list of the tags associated with the model package. For more information, see Tagging AWS resources in the AWS General Reference Guide.

" + } + }, + "documentation":"

A versioned model that can be deployed for SageMaker inference.

" + }, "ModelPackageArn":{ "type":"string", "max":2048, @@ -10577,19 +14815,110 @@ "max":1, "min":1 }, - "ModelPackageSortBy":{ - "type":"string", - "enum":[ - "Name", - "CreationTime" - ] - }, - "ModelPackageStatus":{ - "type":"string", - "enum":[ - "Pending", - "InProgress", - "Completed", + "ModelPackageGroup":{ + "type":"structure", + "members":{ + "ModelPackageGroupName":{ + "shape":"EntityName", + "documentation":"

The name of the model group.

" + }, + "ModelPackageGroupArn":{ + "shape":"ModelPackageGroupArn", + "documentation":"

The Amazon Resource Name (ARN) of the model group.

" + }, + "ModelPackageGroupDescription":{ + "shape":"EntityDescription", + "documentation":"

The description for the model group.

" + }, + "CreationTime":{ + "shape":"CreationTime", + "documentation":"

The time that the model group was created.

" + }, + "CreatedBy":{"shape":"UserContext"}, + "ModelPackageGroupStatus":{ + "shape":"ModelPackageGroupStatus", + "documentation":"

The status of the model group. This can be one of the following values.

  • PENDING - The model group is pending being created.

  • IN_PROGRESS - The model group is in the process of being created.

  • COMPLETED - The model group was successfully created.

  • FAILED - The model group failed.

  • DELETING - The model group is in the process of being deleted.

  • DELETE_FAILED - SageMaker failed to delete the model group.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

A list of the tags associated with the model group. For more information, see Tagging AWS resources in the AWS General Reference Guide.

" + } + }, + "documentation":"

A group of versioned models in the model registry.

" + }, + "ModelPackageGroupArn":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:model-package-group/.*" + }, + "ModelPackageGroupSortBy":{ + "type":"string", + "enum":[ + "Name", + "CreationTime" + ] + }, + "ModelPackageGroupStatus":{ + "type":"string", + "enum":[ + "Pending", + "InProgress", + "Completed", + "Failed", + "Deleting", + "DeleteFailed" + ] + }, + "ModelPackageGroupSummary":{ + "type":"structure", + "required":[ + "ModelPackageGroupName", + "ModelPackageGroupArn", + "CreationTime", + "ModelPackageGroupStatus" + ], + "members":{ + "ModelPackageGroupName":{ + "shape":"EntityName", + "documentation":"

The name of the model group.

" + }, + "ModelPackageGroupArn":{ + "shape":"ModelPackageGroupArn", + "documentation":"

The Amazon Resource Name (ARN) of the model group.

" + }, + "ModelPackageGroupDescription":{ + "shape":"EntityDescription", + "documentation":"

A description of the model group.

" + }, + "CreationTime":{ + "shape":"CreationTime", + "documentation":"

The time that the model group was created.

" + }, + "ModelPackageGroupStatus":{ + "shape":"ModelPackageGroupStatus", + "documentation":"

The status of the model group.

" + } + }, + "documentation":"

Summary information about a model group.

" + }, + "ModelPackageGroupSummaryList":{ + "type":"list", + "member":{"shape":"ModelPackageGroupSummary"} + }, + "ModelPackageSortBy":{ + "type":"string", + "enum":[ + "Name", + "CreationTime" + ] + }, + "ModelPackageStatus":{ + "type":"string", + "enum":[ + "Pending", + "InProgress", + "Completed", "Failed", "Deleting" ] @@ -10648,6 +14977,14 @@ "shape":"EntityName", "documentation":"

The name of the model package.

" }, + "ModelPackageGroupName":{ + "shape":"EntityName", + "documentation":"

If the model package is a versioned model, the model group that the versioned model belongs to.

" + }, + "ModelPackageVersion":{ + "shape":"ModelPackageVersion", + "documentation":"

If the model package is a versioned model, the version of the model.

" + }, "ModelPackageArn":{ "shape":"ModelPackageArn", "documentation":"

The Amazon Resource Name (ARN) of the model package.

" @@ -10663,6 +15000,10 @@ "ModelPackageStatus":{ "shape":"ModelPackageStatus", "documentation":"

The overall status of the model package.

" + }, + "ModelApprovalStatus":{ + "shape":"ModelApprovalStatus", + "documentation":"

The approval status of the model. This can be one of the following values.

  • APPROVED - The model is approved

  • REJECTED - The model is rejected.

  • PENDING_MANUAL_APPROVAL - The model is waiting for manual approval.

" } }, "documentation":"

Provides summary information about a model package.

" @@ -10671,6 +15012,14 @@ "type":"list", "member":{"shape":"ModelPackageSummary"} }, + "ModelPackageType":{ + "type":"string", + "enum":[ + "Versioned", + "Unversioned", + "Both" + ] + }, "ModelPackageValidationProfile":{ "type":"structure", "required":[ @@ -10713,6 +15062,24 @@ }, "documentation":"

Specifies batch transform jobs that Amazon SageMaker runs to validate your model package.

" }, + "ModelPackageVersion":{ + "type":"integer", + "min":1 + }, + "ModelQuality":{ + "type":"structure", + "members":{ + "Statistics":{ + "shape":"MetricsSource", + "documentation":"

Model quality statistics.

" + }, + "Constraints":{ + "shape":"MetricsSource", + "documentation":"

Model quality constraints.

" + } + }, + "documentation":"

Model quality statistics and constraints.

" + }, "ModelSortKey":{ "type":"string", "enum":[ @@ -10720,6 +15087,16 @@ "CreationTime" ] }, + "ModelStepMetadata":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"String256", + "documentation":"

The Amazon Resource Name (ARN) of the created model.

" + } + }, + "documentation":"

Metadata for Model steps.

" + }, "ModelSummary":{ "type":"structure", "required":[ @@ -11035,6 +15412,46 @@ "max":512, "pattern":"^(https|s3)://([^/]+)/?(.*)$" }, + "MonitoringSchedule":{ + "type":"structure", + "members":{ + "MonitoringScheduleArn":{ + "shape":"MonitoringScheduleArn", + "documentation":"

The Amazon Resource Name (ARN) of the monitoring schedule.

" + }, + "MonitoringScheduleName":{ + "shape":"MonitoringScheduleName", + "documentation":"

The name of the monitoring schedule.

" + }, + "MonitoringScheduleStatus":{ + "shape":"ScheduleStatus", + "documentation":"

The status of the monitoring schedule. This can be one of the following values.

  • PENDING - The schedule is pending being created.

  • FAILED - The schedule failed.

  • SCHEDULED - The schedule was successfully created.

  • STOPPED - The schedule was stopped.

" + }, + "FailureReason":{ + "shape":"FailureReason", + "documentation":"

If the monitoring schedule failed, the reason it failed.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

The time that the monitoring schedule was created.

" + }, + "LastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

The last time the monitoring schedule was changed.

" + }, + "MonitoringScheduleConfig":{"shape":"MonitoringScheduleConfig"}, + "EndpointName":{ + "shape":"EndpointName", + "documentation":"

The endpoint that hosts the model being monitored.

" + }, + "LastMonitoringExecutionSummary":{"shape":"MonitoringExecutionSummary"}, + "Tags":{ + "shape":"TagList", + "documentation":"

A list of the tags associated with the monitoring schedlue. For more information, see Tagging AWS resources in the AWS General Reference Guide.

" + } + }, + "documentation":"

A schedule for a model monitoring job. For information about model monitor, see Amazon SageMaker Model Monitor.

" + }, "MonitoringScheduleArn":{ "type":"string", "max":256, @@ -11042,7 +15459,6 @@ }, "MonitoringScheduleConfig":{ "type":"structure", - "required":["MonitoringJobDefinition"], "members":{ "ScheduleConfig":{ "shape":"ScheduleConfig", @@ -11055,11 +15471,15 @@ }, "documentation":"

Configures the monitoring schedule and defines the monitoring job.

" }, + "MonitoringScheduleList":{ + "type":"list", + "member":{"shape":"MonitoringSchedule"} + }, "MonitoringScheduleName":{ "type":"string", "max":63, "min":1, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9])*$" + "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$" }, "MonitoringScheduleSortKey":{ "type":"string", @@ -11131,6 +15551,11 @@ }, "documentation":"

A time limit for how long the monitoring job is allowed to run before stopping.

" }, + "MountPath":{ + "type":"string", + "max":1024, + "pattern":"^\\/.*" + }, "NameContains":{ "type":"string", "max":63, @@ -11433,6 +15858,48 @@ }, "documentation":"

Specifies the number of training jobs that this hyperparameter tuning job launched, categorized by the status of their objective metric. The objective metric status shows whether the final objective metric for the training job has been evaluated by the tuning job and used in the hyperparameter tuning process.

" }, + "OfflineStoreConfig":{ + "type":"structure", + "required":["S3StorageConfig"], + "members":{ + "S3StorageConfig":{ + "shape":"S3StorageConfig", + "documentation":"

The Amazon Simple Storage (Amazon S3) location of OfflineStore.

" + }, + "DisableGlueTableCreation":{ + "shape":"Boolean", + "documentation":"

Set to True to disable the automatic creation of an AWS Glue table when configuring an OfflineStore.

" + }, + "DataCatalogConfig":{ + "shape":"DataCatalogConfig", + "documentation":"

The meta data of the Glue table that is autogenerated when an OfflineStore is created.

" + } + }, + "documentation":"

The configuration of an OfflineStore.

Provide an OfflineStoreConfig in a request to CreateFeatureGroup to create an OfflineStore.

To encrypt an OfflineStore using at rest data encryption, specify AWS Key Management Service (KMS) key ID, or KMSKeyId, in S3StorageConfig.

" + }, + "OfflineStoreStatus":{ + "type":"structure", + "required":["Status"], + "members":{ + "Status":{ + "shape":"OfflineStoreStatusValue", + "documentation":"

An OfflineStore status.

" + }, + "BlockedReason":{ + "shape":"BlockedReason", + "documentation":"

The justification for why the OfflineStoreStatus is Blocked (if applicable).

" + } + }, + "documentation":"

The status of OfflineStore.

" + }, + "OfflineStoreStatusValue":{ + "type":"string", + "enum":[ + "Active", + "Blocked", + "Disabled" + ] + }, "OidcConfig":{ "type":"structure", "required":[ @@ -11531,6 +15998,30 @@ }, "documentation":"

A list of user groups that exist in your OIDC Identity Provider (IdP). One to ten groups can be used to create a single private work team. When you add a user group to the list of Groups, you can add that user group to one or more private work teams. If you add a user group to a private work team, all workers in that user group are added to the work team.

" }, + "OnlineStoreConfig":{ + "type":"structure", + "members":{ + "SecurityConfig":{ + "shape":"OnlineStoreSecurityConfig", + "documentation":"

Use to specify KMS Key ID (KMSKeyId) for at-rest encryption of your OnlineStore.

" + }, + "EnableOnlineStore":{ + "shape":"Boolean", + "documentation":"

Turn OnlineStore off by specifying False for the EnableOnlineStore flag. Turn OnlineStore on by specifying True for the EnableOnlineStore flag.

The default value is False.

" + } + }, + "documentation":"

Use this to specify the AWS Key Management Service (KMS) Key ID, or KMSKeyId, for at rest data encryption. You can turn OnlineStore on or off by specifying the EnableOnlineStore flag at General Assembly; the default value is False.

" + }, + "OnlineStoreSecurityConfig":{ + "type":"structure", + "members":{ + "KmsKeyId":{ + "shape":"KmsKeyId", + "documentation":"

The ID of the AWS Key Management Service (AWS KMS) key that SageMaker Feature Store uses to encrypt the Amazon S3 objects at rest using Amazon S3 server-side encryption.

The caller (either IAM user or IAM role) of CreateFeatureGroup must have below permissions to the OnlineStore KmsKeyId:

  • \"kms:Encrypt\"

  • \"kms:Decrypt\"

  • \"kms:DescribeKey\"

  • \"kms:CreateGrant\"

  • \"kms:RetireGrant\"

  • \"kms:ReEncryptFrom\"

  • \"kms:ReEncryptTo\"

  • \"kms:GenerateDataKey\"

  • \"kms:ListAliases\"

  • \"kms:ListGrants\"

  • \"kms:RevokeGrant\"

The caller (either IAM user or IAM role) to all DataPlane operations (PutRecord, GetRecord, DeleteRecord) must have the following permissions to the KmsKeyId:

  • \"kms:Decrypt\"

" + } + }, + "documentation":"

The security configuration for OnlineStore.

" + }, "Operator":{ "type":"string", "enum":[ @@ -11577,7 +16068,11 @@ }, "CompilerOptions":{ "shape":"CompilerOptions", - "documentation":"

Specifies additional parameters for compiler options in JSON format. The compiler options are TargetPlatform specific. It is required for NVIDIA accelerators and highly recommended for CPU compilations. For any other cases, it is optional to specify CompilerOptions.

  • CPU: Compilation for CPU supports the following compiler options.

    • mcpu: CPU micro-architecture. For example, {'mcpu': 'skylake-avx512'}

    • mattr: CPU flags. For example, {'mattr': ['+neon', '+vfpv4']}

  • ARM: Details of ARM CPU compilations.

    • NEON: NEON is an implementation of the Advanced SIMD extension used in ARMv7 processors.

      For example, add {'mattr': ['+neon']} to the compiler options if compiling for ARM 32-bit platform with the NEON support.

  • NVIDIA: Compilation for NVIDIA GPU supports the following compiler options.

    • gpu_code: Specifies the targeted architecture.

    • trt-ver: Specifies the TensorRT versions in x.y.z. format.

    • cuda-ver: Specifies the CUDA version in x.y format.

    For example, {'gpu-code': 'sm_72', 'trt-ver': '6.0.1', 'cuda-ver': '10.1'}

  • ANDROID: Compilation for the Android OS supports the following compiler options:

    • ANDROID_PLATFORM: Specifies the Android API levels. Available levels range from 21 to 29. For example, {'ANDROID_PLATFORM': 28}.

    • mattr: Add {'mattr': ['+neon']} to compiler options if compiling for ARM 32-bit platform with NEON support.

  • CoreML: Compilation for the CoreML OutputConfig$TargetDevice supports the following compiler options:

    • class_labels: Specifies the classification labels file name inside input tar.gz file. For example, {\"class_labels\": \"imagenet_labels_1000.txt\"}. Labels inside the txt file should be separated by newlines.

" + "documentation":"

Specifies additional parameters for compiler options in JSON format. The compiler options are TargetPlatform specific. It is required for NVIDIA accelerators and highly recommended for CPU compilations. For any other cases, it is optional to specify CompilerOptions.

  • CPU: Compilation for CPU supports the following compiler options.

    • mcpu: CPU micro-architecture. For example, {'mcpu': 'skylake-avx512'}

    • mattr: CPU flags. For example, {'mattr': ['+neon', '+vfpv4']}

  • ARM: Details of ARM CPU compilations.

    • NEON: NEON is an implementation of the Advanced SIMD extension used in ARMv7 processors.

      For example, add {'mattr': ['+neon']} to the compiler options if compiling for ARM 32-bit platform with the NEON support.

  • NVIDIA: Compilation for NVIDIA GPU supports the following compiler options.

    • gpu_code: Specifies the targeted architecture.

    • trt-ver: Specifies the TensorRT versions in x.y.z. format.

    • cuda-ver: Specifies the CUDA version in x.y format.

    For example, {'gpu-code': 'sm_72', 'trt-ver': '6.0.1', 'cuda-ver': '10.1'}

  • ANDROID: Compilation for the Android OS supports the following compiler options:

    • ANDROID_PLATFORM: Specifies the Android API levels. Available levels range from 21 to 29. For example, {'ANDROID_PLATFORM': 28}.

    • mattr: Add {'mattr': ['+neon']} to compiler options if compiling for ARM 32-bit platform with NEON support.

  • INFERENTIA: Compilation for target ml_inf1 uses compiler options passed in as a JSON string. For example, \"CompilerOptions\": \"\\\"--verbose 1 --num-neuroncores 2 -O2\\\"\".

    For information about supported compiler options, see Neuron Compiler CLI.

  • CoreML: Compilation for the CoreML OutputConfig$TargetDevice supports the following compiler options:

    • class_labels: Specifies the classification labels file name inside input tar.gz file. For example, {\"class_labels\": \"imagenet_labels_1000.txt\"}. Labels inside the txt file should be separated by newlines.

" + }, + "KmsKeyId":{ + "shape":"KmsKeyId", + "documentation":"

The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt data on the storage volume after compilation job. If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account

The KmsKeyId can be any of the following formats:

  • Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

  • Key ARN: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

  • Alias name: alias/ExampleAlias

  • Alias name ARN: arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias

" } }, "documentation":"

Contains information about the output location for the compiled model and the target device that the model runs on. TargetDevice and TargetPlatform are mutually exclusive, so you need to choose one between the two to specify your target device or platform. If you cannot find your device you want to use from the TargetDevice list, use TargetPlatform to describe the platform of your edge device and CompilerOptions if there are specific settings that are required or recommended to use for particular TargetPlatform.

" @@ -11602,11 +16097,35 @@ "max":8192, "pattern":".*" }, + "Parameter":{ + "type":"structure", + "required":[ + "Name", + "Value" + ], + "members":{ + "Name":{ + "shape":"PipelineParameterName", + "documentation":"

The name of the parameter to assign a value to. This parameter name must match a named parameter in the pipeline definition.

" + }, + "Value":{ + "shape":"String1024", + "documentation":"

The literal value for the parameter.

" + } + }, + "documentation":"

Assigns a value to a named Pipeline parameter.

" + }, "ParameterKey":{ "type":"string", "max":256, "pattern":".*" }, + "ParameterList":{ + "type":"list", + "member":{"shape":"Parameter"}, + "max":50, + "min":0 + }, "ParameterName":{ "type":"string", "max":256, @@ -11668,39 +16187,339 @@ "max":20, "min":1 }, - "Parent":{ + "Parent":{ + "type":"structure", + "members":{ + "TrialName":{ + "shape":"ExperimentEntityName", + "documentation":"

The name of the trial.

" + }, + "ExperimentName":{ + "shape":"ExperimentEntityName", + "documentation":"

The name of the experiment.

" + } + }, + "documentation":"

The trial that a trial component is associated with and the experiment the trial is part of. A component might not be associated with a trial. A component can be associated with multiple trials.

" + }, + "ParentHyperParameterTuningJob":{ + "type":"structure", + "members":{ + "HyperParameterTuningJobName":{ + "shape":"HyperParameterTuningJobName", + "documentation":"

The name of the hyperparameter tuning job to be used as a starting point for a new hyperparameter tuning job.

" + } + }, + "documentation":"

A previously completed or stopped hyperparameter tuning job to be used as a starting point for a new hyperparameter tuning job.

" + }, + "ParentHyperParameterTuningJobs":{ + "type":"list", + "member":{"shape":"ParentHyperParameterTuningJob"}, + "max":5, + "min":1 + }, + "Parents":{ + "type":"list", + "member":{"shape":"Parent"} + }, + "Pipeline":{ + "type":"structure", + "members":{ + "PipelineArn":{ + "shape":"PipelineArn", + "documentation":"

The Amazon Resource Name (ARN) of the pipeline.

" + }, + "PipelineName":{ + "shape":"PipelineName", + "documentation":"

The name of the pipeline.

" + }, + "PipelineDisplayName":{ + "shape":"PipelineName", + "documentation":"

The display name of the pipeline.

" + }, + "PipelineDescription":{ + "shape":"PipelineDescription", + "documentation":"

The description of the pipeline.

" + }, + "RoleArn":{ + "shape":"RoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the role that created the pipeline.

" + }, + "PipelineStatus":{ + "shape":"PipelineStatus", + "documentation":"

The status of the pipeline.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

The creation time of the pipeline.

" + }, + "LastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

The time that the pipeline was last modified.

" + }, + "LastRunTime":{ + "shape":"Timestamp", + "documentation":"

The time when the pipeline was last run.

" + }, + "CreatedBy":{"shape":"UserContext"}, + "LastModifiedBy":{"shape":"UserContext"}, + "Tags":{ + "shape":"TagList", + "documentation":"

A list of tags that apply to the pipeline.

" + } + }, + "documentation":"

A SageMaker Model Building Pipeline instance.

" + }, + "PipelineArn":{ + "type":"string", + "max":256, + "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:pipeline/.*" + }, + "PipelineDefinition":{ + "type":"string", + "max":1048576, + "min":1, + "pattern":".*(?:[ \\r\\n\\t].*)*" + }, + "PipelineDescription":{ + "type":"string", + "max":3072, + "min":0, + "pattern":".*" + }, + "PipelineExecution":{ + "type":"structure", + "members":{ + "PipelineArn":{ + "shape":"PipelineArn", + "documentation":"

The Amazon Resource Name (ARN) of the pipeline that was executed.

" + }, + "PipelineExecutionArn":{ + "shape":"PipelineExecutionArn", + "documentation":"

The Amazon Resource Name (ARN) of the pipeline execution.

" + }, + "PipelineExecutionDisplayName":{ + "shape":"PipelineExecutionName", + "documentation":"

The display name of the pipeline execution.

" + }, + "PipelineExecutionStatus":{ + "shape":"PipelineExecutionStatus", + "documentation":"

The status of the pipeline status.

" + }, + "PipelineExecutionDescription":{ + "shape":"PipelineExecutionDescription", + "documentation":"

The description of the pipeline execution.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

The creation time of the pipeline execution.

" + }, + "LastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

The time that the pipeline execution was last modified.

" + }, + "CreatedBy":{"shape":"UserContext"}, + "LastModifiedBy":{"shape":"UserContext"}, + "PipelineParameters":{ + "shape":"ParameterList", + "documentation":"

Contains a list of pipeline parameters. This list can be empty.

" + } + }, + "documentation":"

An execution of a pipeline.

" + }, + "PipelineExecutionArn":{ + "type":"string", + "max":256, + "pattern":"^arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:pipeline\\/.*\\/execution\\/.*$" + }, + "PipelineExecutionDescription":{ + "type":"string", + "max":3072, + "min":0, + "pattern":".*" + }, + "PipelineExecutionName":{ + "type":"string", + "max":82, + "min":1, + "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,81}" + }, + "PipelineExecutionStatus":{ + "type":"string", + "enum":[ + "Executing", + "Stopping", + "Stopped", + "Failed", + "Succeeded" + ] + }, + "PipelineExecutionStep":{ + "type":"structure", + "members":{ + "StepName":{ + "shape":"StepName", + "documentation":"

The name of the step that is executed.

" + }, + "StartTime":{ + "shape":"Timestamp", + "documentation":"

The time that the step started executing.

" + }, + "EndTime":{ + "shape":"Timestamp", + "documentation":"

The time that the step stopped executing.

" + }, + "StepStatus":{ + "shape":"StepStatus", + "documentation":"

The status of the step execution.

" + }, + "CacheHitResult":{ + "shape":"CacheHitResult", + "documentation":"

If this pipeline execution step was cached, details on the cache hit.

" + }, + "FailureReason":{ + "shape":"FailureReason", + "documentation":"

The reason why the step failed execution. This is only returned if the step failed its execution.

" + }, + "Metadata":{ + "shape":"PipelineExecutionStepMetadata", + "documentation":"

The metadata for the step execution.

" + } + }, + "documentation":"

An execution of a step in a pipeline.

" + }, + "PipelineExecutionStepList":{ + "type":"list", + "member":{"shape":"PipelineExecutionStep"}, + "max":100, + "min":0 + }, + "PipelineExecutionStepMetadata":{ + "type":"structure", + "members":{ + "TrainingJob":{ + "shape":"TrainingJobStepMetadata", + "documentation":"

The Amazon Resource Name (ARN) of the training job that was run by this step execution.

" + }, + "ProcessingJob":{ + "shape":"ProcessingJobStepMetadata", + "documentation":"

The Amazon Resource Name (ARN) of the processing job that was run by this step execution.

" + }, + "TransformJob":{ + "shape":"TransformJobStepMetadata", + "documentation":"

The Amazon Resource Name (ARN) of the transform job that was run by this step execution.

" + }, + "Model":{ + "shape":"ModelStepMetadata", + "documentation":"

Metadata for the Model step.

" + }, + "RegisterModel":{ + "shape":"RegisterModelStepMetadata", + "documentation":"

Metadata for the RegisterModel step.

" + }, + "Condition":{ + "shape":"ConditionStepMetadata", + "documentation":"

If this is a Condition step metadata object, details on the condition.

" + } + }, + "documentation":"

Metadata for a step execution.

" + }, + "PipelineExecutionSummary":{ + "type":"structure", + "members":{ + "PipelineExecutionArn":{ + "shape":"PipelineExecutionArn", + "documentation":"

The Amazon Resource Name (ARN) of the pipeline execution.

" + }, + "StartTime":{ + "shape":"Timestamp", + "documentation":"

The start time of the pipeline execution.

" + }, + "PipelineExecutionStatus":{ + "shape":"PipelineExecutionStatus", + "documentation":"

The status of the pipeline execution.

" + }, + "PipelineExecutionDescription":{ + "shape":"PipelineExecutionDescription", + "documentation":"

The description of the pipeline execution.

" + }, + "PipelineExecutionDisplayName":{ + "shape":"PipelineExecutionName", + "documentation":"

The display name of the pipeline execution.

" + } + }, + "documentation":"

A pipeline execution summary.

" + }, + "PipelineExecutionSummaryList":{ + "type":"list", + "member":{"shape":"PipelineExecutionSummary"}, + "max":100, + "min":0 + }, + "PipelineName":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,255}" + }, + "PipelineParameterName":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,255}$" + }, + "PipelineStatus":{ + "type":"string", + "enum":["Active"] + }, + "PipelineSummary":{ "type":"structure", "members":{ - "TrialName":{ - "shape":"ExperimentEntityName", - "documentation":"

The name of the trial.

" + "PipelineArn":{ + "shape":"PipelineArn", + "documentation":"

The Amazon Resource Name (ARN) of the pipeline.

" }, - "ExperimentName":{ - "shape":"ExperimentEntityName", - "documentation":"

The name of the experiment.

" - } - }, - "documentation":"

The trial that a trial component is associated with and the experiment the trial is part of. A component might not be associated with a trial. A component can be associated with multiple trials.

" - }, - "ParentHyperParameterTuningJob":{ - "type":"structure", - "members":{ - "HyperParameterTuningJobName":{ - "shape":"HyperParameterTuningJobName", - "documentation":"

The name of the hyperparameter tuning job to be used as a starting point for a new hyperparameter tuning job.

" + "PipelineName":{ + "shape":"PipelineName", + "documentation":"

The name of the pipeline.

" + }, + "PipelineDisplayName":{ + "shape":"PipelineName", + "documentation":"

The display name of the pipeline.

" + }, + "PipelineDescription":{ + "shape":"PipelineDescription", + "documentation":"

The description of the pipeline.

" + }, + "RoleArn":{ + "shape":"RoleArn", + "documentation":"

The Amazon Resource Name (ARN) that the pipeline used to execute.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

The creation time of the pipeline.

" + }, + "LastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

The time that the pipeline was last modified.

" + }, + "LastExecutionTime":{ + "shape":"Timestamp", + "documentation":"

The last time that a pipeline execution began.

" } }, - "documentation":"

A previously completed or stopped hyperparameter tuning job to be used as a starting point for a new hyperparameter tuning job.

" + "documentation":"

A summary of a pipeline.

" }, - "ParentHyperParameterTuningJobs":{ + "PipelineSummaryList":{ "type":"list", - "member":{"shape":"ParentHyperParameterTuningJob"}, - "max":5, - "min":1 + "member":{"shape":"PipelineSummary"}, + "max":100, + "min":0 }, - "Parents":{ - "type":"list", - "member":{"shape":"Parent"} + "PolicyString":{ + "type":"string", + "max":20480, + "min":1, + "pattern":".*" }, "PresignedDomainUrl":{"type":"string"}, "ProblemType":{ @@ -11754,23 +16573,39 @@ "max":256, "pattern":"[\\S\\s]*" }, + "ProcessingFeatureStoreOutput":{ + "type":"structure", + "required":["FeatureGroupName"], + "members":{ + "FeatureGroupName":{ + "shape":"FeatureGroupName", + "documentation":"

The name of the Amazon SageMaker FeatureGroup to use as the destination for processing job output.

" + } + }, + "documentation":"

Configuration for processing job outputs in Amazon SageMaker Feature Store.

" + }, "ProcessingInput":{ "type":"structure", - "required":[ - "InputName", - "S3Input" - ], + "required":["InputName"], "members":{ "InputName":{ "shape":"String", "documentation":"

The name of the inputs for the processing job.

" }, + "AppManaged":{ + "shape":"AppManaged", + "documentation":"

When True, input operations such as data download are managed natively by the processing job application. When False (default), input operations are managed by Amazon SageMaker.

" + }, "S3Input":{ "shape":"ProcessingS3Input", - "documentation":"

The S3 inputs for the processing job.

" + "documentation":"

Configuration for processing job inputs in Amazon S3.

" + }, + "DatasetDefinition":{ + "shape":"DatasetDefinition", + "documentation":"

Configuration for a Dataset Definition input.

" } }, - "documentation":"

The inputs for a processing job.

" + "documentation":"

The inputs for a processing job. The processing input must specify exactly one of either S3Input or DatasetDefinition types.

" }, "ProcessingInputs":{ "type":"list", @@ -11831,7 +16666,7 @@ "members":{ "ProcessingInputs":{ "shape":"ProcessingInputs", - "documentation":"

For each input, data is downloaded from S3 into the processing container before the processing job begins running if \"S3InputMode\" is set to File.

" + "documentation":"

List of input configurations for the processing job.

" }, "ProcessingOutputConfig":{"shape":"ProcessingOutputConfig"}, "ProcessingJobName":{ @@ -11911,7 +16746,7 @@ "type":"string", "max":63, "min":1, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9])*" + "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" }, "ProcessingJobStatus":{ "type":"string", @@ -11923,6 +16758,16 @@ "Stopped" ] }, + "ProcessingJobStepMetadata":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"ProcessingJobArn", + "documentation":"

The Amazon Resource Name (ARN) of the processing job.

" + } + }, + "documentation":"

Metadata for a processing job step.

" + }, "ProcessingJobSummaries":{ "type":"list", "member":{"shape":"ProcessingJobSummary"} @@ -11983,10 +16828,7 @@ }, "ProcessingOutput":{ "type":"structure", - "required":[ - "OutputName", - "S3Output" - ], + "required":["OutputName"], "members":{ "OutputName":{ "shape":"String", @@ -11995,9 +16837,17 @@ "S3Output":{ "shape":"ProcessingS3Output", "documentation":"

Configuration for processing job outputs in Amazon S3.

" + }, + "FeatureStoreOutput":{ + "shape":"ProcessingFeatureStoreOutput", + "documentation":"

Configuration for processing job outputs in Amazon SageMaker Feature Store. This processing output type is only supported when AppManaged is specified.

" + }, + "AppManaged":{ + "shape":"AppManaged", + "documentation":"

When True, output operations such as data upload are managed natively by the processing job application. When False (default), output operations are managed by Amazon SageMaker.

" } }, - "documentation":"

Describes the results of a processing job.

" + "documentation":"

Describes the results of a processing job. The processing output must specify exactly one of either S3Output or FeatureStoreOutput types.

" }, "ProcessingOutputConfig":{ "type":"structure", @@ -12005,7 +16855,7 @@ "members":{ "Outputs":{ "shape":"ProcessingOutputs", - "documentation":"

Output configuration information for a processing job.

" + "documentation":"

List of output configurations for the processing job.

" }, "KmsKeyId":{ "shape":"KmsKeyId", @@ -12056,9 +16906,7 @@ "type":"structure", "required":[ "S3Uri", - "LocalPath", - "S3DataType", - "S3InputMode" + "S3DataType" ], "members":{ "S3Uri":{ @@ -12067,7 +16915,7 @@ }, "LocalPath":{ "shape":"ProcessingLocalPath", - "documentation":"

The local path to the Amazon S3 bucket where you want Amazon SageMaker to download the inputs to run a processing job. LocalPath is an absolute path to the input data.

" + "documentation":"

The local path to the Amazon S3 bucket where you want Amazon SageMaker to download the inputs to run a processing job. LocalPath is an absolute path to the input data. This is a required parameter when AppManaged is False (default).

" }, "S3DataType":{ "shape":"ProcessingS3DataType", @@ -12075,7 +16923,7 @@ }, "S3InputMode":{ "shape":"ProcessingS3InputMode", - "documentation":"

Whether to use File or Pipe input mode. In File mode, Amazon SageMaker copies the data from the input source onto the local Amazon Elastic Block Store (Amazon EBS) volumes before starting your training algorithm. This is the most commonly used input mode. In Pipe mode, Amazon SageMaker streams input data from the source directly to your algorithm without using the EBS volume.

" + "documentation":"

Whether to use File or Pipe input mode. In File mode, Amazon SageMaker copies the data from the input source onto the local Amazon Elastic Block Store (Amazon EBS) volumes before starting your training algorithm. This is the most commonly used input mode. In Pipe mode, Amazon SageMaker streams input data from the source directly to your algorithm without using the EBS volume.This is a required parameter when AppManaged is False (default).

" }, "S3DataDistributionType":{ "shape":"ProcessingS3DataDistributionType", @@ -12086,7 +16934,7 @@ "documentation":"

Whether to use Gzip compression for Amazon S3 storage.

" } }, - "documentation":"

Information about where and how you want to obtain the inputs for an processing job.

" + "documentation":"

Configuration for processing job inputs in Amazon S3.

" }, "ProcessingS3InputMode":{ "type":"string", @@ -12116,7 +16964,7 @@ "documentation":"

Whether to upload the results of the processing job continuously or after the job completes.

" } }, - "documentation":"

Information about where and how you want to store the results of an processing job.

" + "documentation":"

Configuration for processing job outputs in Amazon S3.

" }, "ProcessingS3UploadMode":{ "type":"string", @@ -12310,6 +17158,91 @@ "member":{"shape":"ProductionVariantSummary"}, "min":1 }, + "ProjectArn":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:project:.*" + }, + "ProjectEntityName":{ + "type":"string", + "max":32, + "min":1, + "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,31}" + }, + "ProjectId":{ + "type":"string", + "max":20, + "min":1, + "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9])*" + }, + "ProjectSortBy":{ + "type":"string", + "enum":[ + "Name", + "CreationTime" + ] + }, + "ProjectSortOrder":{ + "type":"string", + "enum":[ + "Ascending", + "Descending" + ] + }, + "ProjectStatus":{ + "type":"string", + "enum":[ + "Pending", + "CreateInProgress", + "CreateCompleted", + "CreateFailed", + "DeleteInProgress", + "DeleteFailed", + "DeleteCompleted" + ] + }, + "ProjectSummary":{ + "type":"structure", + "required":[ + "ProjectName", + "ProjectArn", + "ProjectId", + "CreationTime", + "ProjectStatus" + ], + "members":{ + "ProjectName":{ + "shape":"ProjectEntityName", + "documentation":"

The name of the project.

" + }, + "ProjectDescription":{ + "shape":"EntityDescription", + "documentation":"

The description of the project.

" + }, + "ProjectArn":{ + "shape":"ProjectArn", + "documentation":"

The Amazon Resource Name (ARN) of the project.

" + }, + "ProjectId":{ + "shape":"ProjectId", + "documentation":"

The ID of the project.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

The time that the project was created.

" + }, + "ProjectStatus":{ + "shape":"ProjectStatus", + "documentation":"

The status of the project.

" + } + }, + "documentation":"

Information about a project.

" + }, + "ProjectSummaryList":{ + "type":"list", + "member":{"shape":"ProjectSummary"} + }, "PropertyNameHint":{ "type":"string", "max":100, @@ -12341,6 +17274,39 @@ "type":"list", "member":{"shape":"PropertyNameSuggestion"} }, + "ProvisionedProductStatusMessage":{ + "type":"string", + "pattern":".*" + }, + "ProvisioningParameter":{ + "type":"structure", + "members":{ + "Key":{ + "shape":"ProvisioningParameterKey", + "documentation":"

The key that identifies a provisioning parameter.

" + }, + "Value":{ + "shape":"ProvisioningParameterValue", + "documentation":"

The value of the provisioning parameter.

" + } + }, + "documentation":"

A key value pair used when you provision a project as a service catalog product. For information, see What is AWS Service Catalog.

" + }, + "ProvisioningParameterKey":{ + "type":"string", + "max":1000, + "min":1, + "pattern":".*" + }, + "ProvisioningParameterValue":{ + "type":"string", + "max":4096, + "pattern":".*" + }, + "ProvisioningParameters":{ + "type":"list", + "member":{"shape":"ProvisioningParameter"} + }, "PublicWorkforceTaskPrice":{ "type":"structure", "members":{ @@ -12351,6 +17317,33 @@ }, "documentation":"

Defines the amount of money paid to an Amazon Mechanical Turk worker for each task performed.

Use one of the following prices for bounding box tasks. Prices are in US dollars and should be based on the complexity of the task; the longer it takes in your initial testing, the more you should offer.

  • 0.036

  • 0.048

  • 0.060

  • 0.072

  • 0.120

  • 0.240

  • 0.360

  • 0.480

  • 0.600

  • 0.720

  • 0.840

  • 0.960

  • 1.080

  • 1.200

Use one of the following prices for image classification, text classification, and custom tasks. Prices are in US dollars.

  • 0.012

  • 0.024

  • 0.036

  • 0.048

  • 0.060

  • 0.072

  • 0.120

  • 0.240

  • 0.360

  • 0.480

  • 0.600

  • 0.720

  • 0.840

  • 0.960

  • 1.080

  • 1.200

Use one of the following prices for semantic segmentation tasks. Prices are in US dollars.

  • 0.840

  • 0.960

  • 1.080

  • 1.200

Use one of the following prices for Textract AnalyzeDocument Important Form Key Amazon Augmented AI review tasks. Prices are in US dollars.

  • 2.400

  • 2.280

  • 2.160

  • 2.040

  • 1.920

  • 1.800

  • 1.680

  • 1.560

  • 1.440

  • 1.320

  • 1.200

  • 1.080

  • 0.960

  • 0.840

  • 0.720

  • 0.600

  • 0.480

  • 0.360

  • 0.240

  • 0.120

  • 0.072

  • 0.060

  • 0.048

  • 0.036

  • 0.024

  • 0.012

Use one of the following prices for Rekognition DetectModerationLabels Amazon Augmented AI review tasks. Prices are in US dollars.

  • 1.200

  • 1.080

  • 0.960

  • 0.840

  • 0.720

  • 0.600

  • 0.480

  • 0.360

  • 0.240

  • 0.120

  • 0.072

  • 0.060

  • 0.048

  • 0.036

  • 0.024

  • 0.012

Use one of the following prices for Amazon Augmented AI custom human review tasks. Prices are in US dollars.

  • 1.200

  • 1.080

  • 0.960

  • 0.840

  • 0.720

  • 0.600

  • 0.480

  • 0.360

  • 0.240

  • 0.120

  • 0.072

  • 0.060

  • 0.048

  • 0.036

  • 0.024

  • 0.012

" }, + "PutModelPackageGroupPolicyInput":{ + "type":"structure", + "required":[ + "ModelPackageGroupName", + "ResourcePolicy" + ], + "members":{ + "ModelPackageGroupName":{ + "shape":"EntityName", + "documentation":"

The name of the model group to add a resource policy to.

" + }, + "ResourcePolicy":{ + "shape":"PolicyString", + "documentation":"

The resource policy for the model group.

" + } + } + }, + "PutModelPackageGroupPolicyOutput":{ + "type":"structure", + "required":["ModelPackageGroupArn"], + "members":{ + "ModelPackageGroupArn":{ + "shape":"ModelPackageGroupArn", + "documentation":"

The Amazon Resource Name (ARN) of the model package group.

" + } + } + }, "RealtimeInferenceInstanceTypes":{ "type":"list", "member":{"shape":"ProductionVariantInstanceType"} @@ -12362,6 +17355,96 @@ "RecordIO" ] }, + "RedshiftClusterId":{ + "type":"string", + "documentation":"

The Redshift cluster Identifier.

", + "max":63, + "min":1, + "pattern":".*" + }, + "RedshiftDatabase":{ + "type":"string", + "documentation":"

The name of the Redshift database used in Redshift query execution.

", + "max":64, + "min":1, + "pattern":".*" + }, + "RedshiftDatasetDefinition":{ + "type":"structure", + "required":[ + "ClusterId", + "Database", + "DbUser", + "QueryString", + "ClusterRoleArn", + "OutputS3Uri", + "OutputFormat" + ], + "members":{ + "ClusterId":{"shape":"RedshiftClusterId"}, + "Database":{"shape":"RedshiftDatabase"}, + "DbUser":{"shape":"RedshiftUserName"}, + "QueryString":{"shape":"RedshiftQueryString"}, + "ClusterRoleArn":{ + "shape":"RoleArn", + "documentation":"

The IAM role attached to your Redshift cluster that Amazon SageMaker uses to generate datasets.

" + }, + "OutputS3Uri":{ + "shape":"S3Uri", + "documentation":"

The location in Amazon S3 where the Redshift query results are stored.

" + }, + "KmsKeyId":{ + "shape":"KmsKeyId", + "documentation":"

The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt data from a Redshift execution.

" + }, + "OutputFormat":{"shape":"RedshiftResultFormat"}, + "OutputCompression":{"shape":"RedshiftResultCompressionType"} + }, + "documentation":"

Configuration for Redshift Dataset Definition input.

" + }, + "RedshiftQueryString":{ + "type":"string", + "documentation":"

The SQL query statements to be executed.

", + "max":4096, + "min":1, + "pattern":"[\\s\\S]+" + }, + "RedshiftResultCompressionType":{ + "type":"string", + "documentation":"

The compression used for Redshift query results.

", + "enum":[ + "None", + "GZIP", + "BZIP2", + "ZSTD", + "SNAPPY" + ] + }, + "RedshiftResultFormat":{ + "type":"string", + "documentation":"

The data storage format for Redshift query results.

", + "enum":[ + "PARQUET", + "CSV" + ] + }, + "RedshiftUserName":{ + "type":"string", + "documentation":"

The database user name used in Redshift query execution.

", + "max":128, + "min":1, + "pattern":".*" + }, + "RegisterModelStepMetadata":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"String256", + "documentation":"

The Amazon Resource Name (ARN) of the model package.

" + } + }, + "documentation":"

Metadata for a register model job step.

" + }, "RenderUiTemplateRequest":{ "type":"structure", "required":[ @@ -12459,7 +17542,7 @@ "ResourceArn":{ "type":"string", "max":256, - "pattern":"arn:.*" + "pattern":"arn:aws[a-z-]*:sagemaker:[a-z0-9-]*:[0-9]{12}:.+" }, "ResourceConfig":{ "type":"structure", @@ -12545,14 +17628,18 @@ "members":{ "SageMakerImageArn":{ "shape":"ImageArn", - "documentation":"

The Amazon Resource Name (ARN) of the SageMaker image created on the instance.

" + "documentation":"

The ARN of the SageMaker image that the image version belongs to.

" + }, + "SageMakerImageVersionArn":{ + "shape":"ImageVersionArn", + "documentation":"

The ARN of the image version created on the instance.

" }, "InstanceType":{ "shape":"AppInstanceType", - "documentation":"

The instance type.

" + "documentation":"

The instance type that the image version runs on.

" } }, - "documentation":"

The instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. The ARN is stored as metadata in SageMaker Studio notebooks.

" + "documentation":"

Specifies the ARN's of a SageMaker image and SageMaker image version, and the instance type that the version runs on.

" }, "ResourceType":{ "type":"string", @@ -12560,7 +17647,13 @@ "TrainingJob", "Experiment", "ExperimentTrial", - "ExperimentTrialComponent" + "ExperimentTrialComponent", + "Endpoint", + "ModelPackage", + "ModelPackageGroup", + "Pipeline", + "PipelineExecution", + "FeatureGroup" ] }, "ResponseMIMEType":{ @@ -12623,7 +17716,7 @@ "type":"map", "key":{"shape":"ConfigKey"}, "value":{"shape":"ConfigValue"}, - "max":20, + "max":100, "min":0 }, "S3DataDistribution":{ @@ -12667,11 +17760,33 @@ "AugmentedManifestFile" ] }, + "S3StorageConfig":{ + "type":"structure", + "required":["S3Uri"], + "members":{ + "S3Uri":{ + "shape":"S3Uri", + "documentation":"

The S3 URI, or location in Amazon S3, of OfflineStore.

S3 URIs have a format similar to the following: s3://example-bucket/prefix/.

" + }, + "KmsKeyId":{ + "shape":"KmsKeyId", + "documentation":"

The AWS Key Management Service (KMS) key ID of the key used to encrypt any objects written into the OfflineStore S3 location.

The IAM roleARN that is passed as a parameter to CreateFeatureGroup must have below permissions to the KmsKeyId:

  • \"kms:GenerateDataKey\"

" + } + }, + "documentation":"

The Amazon Simple Storage (Amazon S3) location and and security configuration for OfflineStore.

" + }, "S3Uri":{ "type":"string", "max":1024, "pattern":"^(https|s3)://([^/]+)/?(.*)$" }, + "SagemakerServicecatalogStatus":{ + "type":"string", + "enum":[ + "Enabled", + "Disabled" + ] + }, "SamplingPercentage":{ "type":"integer", "max":100, @@ -12748,7 +17863,13 @@ "TrialComponent":{ "shape":"TrialComponent", "documentation":"

The properties of a trial component.

" - } + }, + "Endpoint":{"shape":"Endpoint"}, + "ModelPackage":{"shape":"ModelPackage"}, + "ModelPackageGroup":{"shape":"ModelPackageGroup"}, + "Pipeline":{"shape":"Pipeline"}, + "PipelineExecution":{"shape":"PipelineExecution"}, + "FeatureGroup":{"shape":"FeatureGroup"} }, "documentation":"

A single resource returned as part of the Search API response.

" }, @@ -12823,7 +17944,8 @@ "Completed", "Failed", "Interrupted", - "MaxWaitTimeExceeded" + "MaxWaitTimeExceeded", + "Updating" ] }, "SecondaryStatusTransition":{ @@ -12873,6 +17995,52 @@ "max":5 }, "Seed":{"type":"long"}, + "ServiceCatalogEntityId":{ + "type":"string", + "max":100, + "min":1, + "pattern":"^[a-zA-Z0-9_\\-]*" + }, + "ServiceCatalogProvisionedProductDetails":{ + "type":"structure", + "members":{ + "ProvisionedProductId":{ + "shape":"ServiceCatalogEntityId", + "documentation":"

The ID of the provisioned product.

" + }, + "ProvisionedProductStatusMessage":{ + "shape":"ProvisionedProductStatusMessage", + "documentation":"

The current status of the product.

  • AVAILABLE - Stable state, ready to perform any operation. The most recent operation succeeded and completed.

  • UNDER_CHANGE - Transitive state. Operations performed might not have valid results. Wait for an AVAILABLE status before performing operations.

  • TAINTED - Stable state, ready to perform any operation. The stack has completed the requested operation but is not exactly what was requested. For example, a request to update to a new version failed and the stack rolled back to the current version.

  • ERROR - An unexpected error occurred. The provisioned product exists but the stack is not running. For example, CloudFormation received a parameter value that was not valid and could not launch the stack.

  • PLAN_IN_PROGRESS - Transitive state. The plan operations were performed to provision a new product, but resources have not yet been created. After reviewing the list of resources to be created, execute the plan. Wait for an AVAILABLE status before performing operations.

" + } + }, + "documentation":"

Details of a provisioned service catalog product. For information about service catalog, see What is AWS Service Catalog.

" + }, + "ServiceCatalogProvisioningDetails":{ + "type":"structure", + "required":[ + "ProductId", + "ProvisioningArtifactId" + ], + "members":{ + "ProductId":{ + "shape":"ServiceCatalogEntityId", + "documentation":"

The ID of the product to provision.

" + }, + "ProvisioningArtifactId":{ + "shape":"ServiceCatalogEntityId", + "documentation":"

The ID of the provisioning artifact.

" + }, + "PathId":{ + "shape":"ServiceCatalogEntityId", + "documentation":"

The path identifier of the product. This value is optional if the product has a default path, and required if the product has more than one path.

" + }, + "ProvisioningParameters":{ + "shape":"ProvisioningParameters", + "documentation":"

A list of key value pairs that you specify when you provision a product.

" + } + }, + "documentation":"

Details that you specify to provision a service catalog product. For information about service catalog, see .What is AWS Service Catalog.

" + }, "SessionExpirationDurationInSeconds":{ "type":"integer", "max":43200, @@ -12911,10 +18079,31 @@ "type":"string", "pattern":"UserName" }, - "SnsTopicArn":{ + "SnsTopicArn":{ + "type":"string", + "max":2048, + "pattern":"arn:aws[a-z\\-]*:sns:[a-z0-9\\-]*:[0-9]{12}:[a-zA-Z0-9_.-]+" + }, + "SortActionsBy":{ + "type":"string", + "enum":[ + "Name", + "CreationTime" + ] + }, + "SortArtifactsBy":{ + "type":"string", + "enum":["CreationTime"] + }, + "SortAssociationsBy":{ "type":"string", - "max":2048, - "pattern":"arn:aws[a-z\\-]*:sns:[a-z0-9\\-]*:[0-9]{12}:[a-zA-Z0-9_.-]*" + "enum":[ + "SourceArn", + "DestinationArn", + "SourceType", + "DestinationType", + "CreationTime" + ] }, "SortBy":{ "type":"string", @@ -12924,6 +18113,13 @@ "Status" ] }, + "SortContextsBy":{ + "type":"string", + "enum":[ + "Name", + "CreationTime" + ] + }, "SortExperimentsBy":{ "type":"string", "enum":[ @@ -12938,6 +18134,20 @@ "Descending" ] }, + "SortPipelineExecutionsBy":{ + "type":"string", + "enum":[ + "CreationTime", + "PipelineExecutionArn" + ] + }, + "SortPipelinesBy":{ + "type":"string", + "enum":[ + "Name", + "CreationTime" + ] + }, "SortTrialComponentsBy":{ "type":"string", "enum":[ @@ -12999,6 +18209,11 @@ "type":"string", "max":128 }, + "SourceUri":{ + "type":"string", + "max":2048, + "pattern":".*" + }, "SplitType":{ "type":"string", "enum":[ @@ -13028,12 +18243,67 @@ } } }, + "StartPipelineExecutionRequest":{ + "type":"structure", + "required":[ + "PipelineName", + "ClientRequestToken" + ], + "members":{ + "PipelineName":{ + "shape":"PipelineName", + "documentation":"

The name of the pipeline.

" + }, + "PipelineExecutionDisplayName":{ + "shape":"PipelineExecutionName", + "documentation":"

The display name of the pipeline execution.

" + }, + "PipelineParameters":{ + "shape":"ParameterList", + "documentation":"

Contains a list of pipeline parameters. This list can be empty.

" + }, + "PipelineExecutionDescription":{ + "shape":"PipelineExecutionDescription", + "documentation":"

The description of the pipeline execution.

" + }, + "ClientRequestToken":{ + "shape":"IdempotencyToken", + "documentation":"

A unique, case-sensitive identifier that you provide to ensure the idempotency of the operation. An idempotent operation completes no more than one time.

", + "idempotencyToken":true + } + } + }, + "StartPipelineExecutionResponse":{ + "type":"structure", + "members":{ + "PipelineExecutionArn":{ + "shape":"PipelineExecutionArn", + "documentation":"

The Amazon Resource Name (ARN) of the pipeline execution.

" + } + } + }, "StatusDetails":{ "type":"string", "max":1024, "pattern":".*" }, "StatusMessage":{"type":"string"}, + "StepName":{ + "type":"string", + "max":256, + "pattern":".*" + }, + "StepStatus":{ + "type":"string", + "enum":[ + "Starting", + "Executing", + "Stopping", + "Stopped", + "Failed", + "Succeeded" + ] + }, "StopAutoMLJobRequest":{ "type":"structure", "required":["AutoMLJobName"], @@ -13094,6 +18364,33 @@ } } }, + "StopPipelineExecutionRequest":{ + "type":"structure", + "required":[ + "PipelineExecutionArn", + "ClientRequestToken" + ], + "members":{ + "PipelineExecutionArn":{ + "shape":"PipelineExecutionArn", + "documentation":"

The Amazon Resource Name (ARN) of the pipeline execution.

" + }, + "ClientRequestToken":{ + "shape":"IdempotencyToken", + "documentation":"

A unique, case-sensitive identifier that you provide to ensure the idempotency of the operation. An idempotent operation completes no more than one time.

", + "idempotencyToken":true + } + } + }, + "StopPipelineExecutionResponse":{ + "type":"structure", + "members":{ + "PipelineExecutionArn":{ + "shape":"PipelineExecutionArn", + "documentation":"

The Amazon Resource Name (ARN) of the pipeline execution.

" + } + } + }, "StopProcessingJobRequest":{ "type":"structure", "required":["ProcessingJobName"], @@ -13149,10 +18446,18 @@ "min":1, "pattern":".+" }, + "String2048":{ + "type":"string", + "max":2048 + }, "String256":{ "type":"string", "max":256 }, + "String64":{ + "type":"string", + "max":64 + }, "StringParameterValue":{ "type":"string", "max":256, @@ -13211,6 +18516,12 @@ }, "documentation":"

Specified in the GetSearchSuggestions request. Limits the property names that are included in the response.

" }, + "TableName":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" + }, "Tag":{ "type":"structure", "required":[ @@ -13286,7 +18597,8 @@ "amba_cv22", "x86_win32", "x86_win64", - "coreml" + "coreml", + "jacinto_tda4vm" ] }, "TargetObjectiveMetricValue":{"type":"float"}, @@ -13427,7 +18739,41 @@ "max":9, "min":0 }, + "TerminationWaitInSeconds":{ + "type":"integer", + "max":3600, + "min":0 + }, "Timestamp":{"type":"timestamp"}, + "TrafficRoutingConfig":{ + "type":"structure", + "required":[ + "Type", + "WaitIntervalInSeconds" + ], + "members":{ + "Type":{ + "shape":"TrafficRoutingConfigType", + "documentation":"

" + }, + "WaitIntervalInSeconds":{ + "shape":"WaitIntervalInSeconds", + "documentation":"

" + }, + "CanarySize":{ + "shape":"CapacitySize", + "documentation":"

" + } + }, + "documentation":"

Currently, the TrafficRoutingConfig API is not supported.

" + }, + "TrafficRoutingConfigType":{ + "type":"string", + "enum":[ + "ALL_AT_ONCE", + "CANARY" + ] + }, "TrainingInputMode":{ "type":"string", "enum":[ @@ -13470,6 +18816,7 @@ "ml.p3.8xlarge", "ml.p3.16xlarge", "ml.p3dn.24xlarge", + "ml.p4d.24xlarge", "ml.c5.xlarge", "ml.c5.2xlarge", "ml.c5.4xlarge", @@ -13615,7 +18962,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

An array of key-value pairs. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide.

" + "documentation":"

An array of key-value pairs. You can use tags to categorize your AWS resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging AWS Resources.

" } }, "documentation":"

Contains information about a training job.

" @@ -13673,7 +19020,7 @@ "type":"string", "max":63, "min":1, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9])*" + "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" }, "TrainingJobSortByOptions":{ "type":"string", @@ -13724,6 +19071,16 @@ }, "documentation":"

The numbers of training jobs launched by a hyperparameter tuning job, categorized by status.

" }, + "TrainingJobStepMetadata":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"TrainingJobArn", + "documentation":"

The Amazon Resource Name (ARN) of the training job that was run by this step execution.

" + } + }, + "documentation":"

Metadata for a training job step.

" + }, "TrainingJobSummaries":{ "type":"list", "member":{"shape":"TrainingJobSummary"} @@ -13825,7 +19182,7 @@ "TransformEnvironmentKey":{ "type":"string", "max":1024, - "pattern":"[a-zA-Z_][a-zA-Z0-9_]*" + "pattern":"[a-zA-Z_][a-zA-Z0-9_]{0,1023}" }, "TransformEnvironmentMap":{ "type":"map", @@ -14021,7 +19378,7 @@ "type":"string", "max":63, "min":1, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9])*" + "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" }, "TransformJobStatus":{ "type":"string", @@ -14033,6 +19390,16 @@ "Stopped" ] }, + "TransformJobStepMetadata":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"TransformJobArn", + "documentation":"

The Amazon Resource Name (ARN) of the transform job that was run by this step execution.

" + } + }, + "documentation":"

Metadata for a transform job step.

" + }, "TransformJobSummaries":{ "type":"list", "member":{"shape":"TransformJobSummary"} @@ -14170,6 +19537,7 @@ "documentation":"

Who last modified the trial.

" }, "LastModifiedBy":{"shape":"UserContext"}, + "MetadataProperties":{"shape":"MetadataProperties"}, "Tags":{ "shape":"TagList", "documentation":"

The list of tags that are associated with the trial. You can use Search API to search on the tags.

" @@ -14240,6 +19608,7 @@ "shape":"TrialComponentMetricSummaries", "documentation":"

The metrics for the component.

" }, + "MetadataProperties":{"shape":"MetadataProperties"}, "SourceDetail":{ "shape":"TrialComponentSourceDetail", "documentation":"

Details of the source of the component.

" @@ -14627,6 +19996,95 @@ }, "documentation":"

Container for user interface template information.

" }, + "UpdateActionRequest":{ + "type":"structure", + "required":["ActionName"], + "members":{ + "ActionName":{ + "shape":"ExperimentEntityName", + "documentation":"

The name of the action to update.

" + }, + "Description":{ + "shape":"ExperimentDescription", + "documentation":"

The new description for the action.

" + }, + "Status":{ + "shape":"ActionStatus", + "documentation":"

The new status for the action.

" + }, + "Properties":{ + "shape":"LineageEntityParameters", + "documentation":"

The new list of properties. Overwrites the current property list.

" + }, + "PropertiesToRemove":{ + "shape":"ListLineageEntityParameterKey", + "documentation":"

A list of properties to remove.

" + } + } + }, + "UpdateActionResponse":{ + "type":"structure", + "members":{ + "ActionArn":{ + "shape":"ActionArn", + "documentation":"

The Amazon Resource Name (ARN) of the action.

" + } + } + }, + "UpdateAppImageConfigRequest":{ + "type":"structure", + "required":["AppImageConfigName"], + "members":{ + "AppImageConfigName":{ + "shape":"AppImageConfigName", + "documentation":"

The name of the AppImageConfig to update.

" + }, + "KernelGatewayImageConfig":{ + "shape":"KernelGatewayImageConfig", + "documentation":"

The new KernelGateway app to run on the image.

" + } + } + }, + "UpdateAppImageConfigResponse":{ + "type":"structure", + "members":{ + "AppImageConfigArn":{ + "shape":"AppImageConfigArn", + "documentation":"

The Amazon Resource Name (ARN) for the AppImageConfig.

" + } + } + }, + "UpdateArtifactRequest":{ + "type":"structure", + "required":["ArtifactArn"], + "members":{ + "ArtifactArn":{ + "shape":"ArtifactArn", + "documentation":"

The Amazon Resource Name (ARN) of the artifact to update.

" + }, + "ArtifactName":{ + "shape":"ExperimentEntityName", + "documentation":"

The new name for the artifact.

" + }, + "Properties":{ + "shape":"LineageEntityParameters", + "documentation":"

The new list of properties. Overwrites the current property list.

" + }, + "PropertiesToRemove":{ + "shape":"ListLineageEntityParameterKey", + "documentation":"

A list of properties to remove.

" + } + } + }, + "UpdateArtifactResponse":{ + "type":"structure", + "members":{ + "ArtifactArn":{ + "shape":"ArtifactArn", + "documentation":"

The Amazon Resource Name (ARN) of the artifact.

" + } + } + }, "UpdateCodeRepositoryInput":{ "type":"structure", "required":["CodeRepositoryName"], @@ -14651,6 +20109,37 @@ } } }, + "UpdateContextRequest":{ + "type":"structure", + "required":["ContextName"], + "members":{ + "ContextName":{ + "shape":"ExperimentEntityName", + "documentation":"

The name of the context to update.

" + }, + "Description":{ + "shape":"ExperimentDescription", + "documentation":"

The new description for the context.

" + }, + "Properties":{ + "shape":"LineageEntityParameters", + "documentation":"

The new list of properties. Overwrites the current property list.

" + }, + "PropertiesToRemove":{ + "shape":"ListLineageEntityParameterKey", + "documentation":"

A list of properties to remove.

" + } + } + }, + "UpdateContextResponse":{ + "type":"structure", + "members":{ + "ContextArn":{ + "shape":"ContextArn", + "documentation":"

The Amazon Resource Name (ARN) of the context.

" + } + } + }, "UpdateDomainRequest":{ "type":"structure", "required":["DomainId"], @@ -14691,11 +20180,15 @@ }, "RetainAllVariantProperties":{ "shape":"Boolean", - "documentation":"

When updating endpoint resources, enables or disables the retention of variant properties, such as the instance count or the variant weight. To retain the variant properties of an endpoint when updating it, set RetainAllVariantProperties to true. To use the variant properties specified in a new EndpointConfig call when updating an endpoint, set RetainAllVariantProperties to false.

" + "documentation":"

When updating endpoint resources, enables or disables the retention of variant properties, such as the instance count or the variant weight. To retain the variant properties of an endpoint when updating it, set RetainAllVariantProperties to true. To use the variant properties specified in a new EndpointConfig call when updating an endpoint, set RetainAllVariantProperties to false. The default is false.

" }, "ExcludeRetainedVariantProperties":{ "shape":"VariantPropertyList", "documentation":"

When you are updating endpoint resources with UpdateEndpointInput$RetainAllVariantProperties, whose value is set to true, ExcludeRetainedVariantProperties specifies the list of type VariantProperty to override with the values provided by EndpointConfig. If you don't specify a value for ExcludeAllVariantProperties, no variant properties are overridden.

" + }, + "DeploymentConfig":{ + "shape":"DeploymentConfig", + "documentation":"

The deployment configuration for the endpoint to be updated.

" } } }, @@ -14763,6 +20256,72 @@ } } }, + "UpdateImageRequest":{ + "type":"structure", + "required":["ImageName"], + "members":{ + "DeleteProperties":{ + "shape":"ImageDeletePropertyList", + "documentation":"

A list of properties to delete. Only the Description and DisplayName properties can be deleted.

" + }, + "Description":{ + "shape":"ImageDescription", + "documentation":"

The new description for the image.

" + }, + "DisplayName":{ + "shape":"ImageDisplayName", + "documentation":"

The new display name for the image.

" + }, + "ImageName":{ + "shape":"ImageName", + "documentation":"

The name of the image to update.

" + }, + "RoleArn":{ + "shape":"RoleArn", + "documentation":"

The new Amazon Resource Name (ARN) for the IAM role that enables Amazon SageMaker to perform tasks on your behalf.

" + } + } + }, + "UpdateImageResponse":{ + "type":"structure", + "members":{ + "ImageArn":{ + "shape":"ImageArn", + "documentation":"

The Amazon Resource Name (ARN) of the image.

" + } + } + }, + "UpdateModelPackageInput":{ + "type":"structure", + "required":[ + "ModelPackageArn", + "ModelApprovalStatus" + ], + "members":{ + "ModelPackageArn":{ + "shape":"ModelPackageArn", + "documentation":"

The Amazon Resource Name (ARN) of the model.

" + }, + "ModelApprovalStatus":{ + "shape":"ModelApprovalStatus", + "documentation":"

The approval status of the model.

" + }, + "ApprovalDescription":{ + "shape":"ApprovalDescription", + "documentation":"

A description for the approval status of the model.

" + } + } + }, + "UpdateModelPackageOutput":{ + "type":"structure", + "required":["ModelPackageArn"], + "members":{ + "ModelPackageArn":{ + "shape":"ModelPackageArn", + "documentation":"

The Amazon Resource Name (ARN) of the model.

" + } + } + }, "UpdateMonitoringScheduleRequest":{ "type":"structure", "required":[ @@ -14876,6 +20435,68 @@ "members":{ } }, + "UpdatePipelineExecutionRequest":{ + "type":"structure", + "required":["PipelineExecutionArn"], + "members":{ + "PipelineExecutionArn":{ + "shape":"PipelineExecutionArn", + "documentation":"

The Amazon Resource Name (ARN) of the pipeline execution.

" + }, + "PipelineExecutionDescription":{ + "shape":"PipelineExecutionDescription", + "documentation":"

The description of the pipeline execution.

" + }, + "PipelineExecutionDisplayName":{ + "shape":"PipelineExecutionName", + "documentation":"

The display name of the pipeline execution.

" + } + } + }, + "UpdatePipelineExecutionResponse":{ + "type":"structure", + "members":{ + "PipelineExecutionArn":{ + "shape":"PipelineExecutionArn", + "documentation":"

The Amazon Resource Name (ARN) of the updated pipeline execution.

" + } + } + }, + "UpdatePipelineRequest":{ + "type":"structure", + "required":["PipelineName"], + "members":{ + "PipelineName":{ + "shape":"PipelineName", + "documentation":"

The name of the pipeline to update.

" + }, + "PipelineDisplayName":{ + "shape":"PipelineName", + "documentation":"

The display name of the pipeline.

" + }, + "PipelineDefinition":{ + "shape":"PipelineDefinition", + "documentation":"

The JSON pipeline definition.

" + }, + "PipelineDescription":{ + "shape":"PipelineDescription", + "documentation":"

The description of the pipeline.

" + }, + "RoleArn":{ + "shape":"RoleArn", + "documentation":"

The Amazon Resource Name (ARN) that the pipeline uses to execute.

" + } + } + }, + "UpdatePipelineResponse":{ + "type":"structure", + "members":{ + "PipelineArn":{ + "shape":"PipelineArn", + "documentation":"

The Amazon Resource Name (ARN) of the updated pipeline.

" + } + } + }, "UpdateTrialComponentRequest":{ "type":"structure", "required":["TrialComponentName"], @@ -15109,7 +20730,7 @@ "UserProfileName":{ "type":"string", "max":63, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9])*" + "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" }, "UserProfileSortKey":{ "type":"string", @@ -15124,7 +20745,10 @@ "Deleting", "Failed", "InService", - "Pending" + "Pending", + "Updating", + "Update_Failed", + "Delete_Failed" ] }, "UserSettings":{ @@ -15136,7 +20760,7 @@ }, "SecurityGroups":{ "shape":"SecurityGroupIds", - "documentation":"

The security groups.

" + "documentation":"

The security groups for the Amazon Virtual Private Cloud (VPC) that Studio uses for communication.

Optional when the CreateDomain.AppNetworkAccessType parameter is set to PublicInternetOnly.

Required when the CreateDomain.AppNetworkAccessType parameter is set to VpcOnly.

" }, "SharingSettings":{ "shape":"SharingSettings", @@ -15160,7 +20784,7 @@ "VariantName":{ "type":"string", "max":63, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9])*" + "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" }, "VariantProperty":{ "type":"structure", @@ -15191,6 +20815,12 @@ "type":"float", "min":0 }, + "VersionedArnOrName":{ + "type":"string", + "max":176, + "min":1, + "pattern":"(arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:[a-z\\-]*\\/)?([a-zA-Z0-9]([a-zA-Z0-9-]){0,62})(? software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT sagemakera2iruntime AWS Java SDK :: Services :: SageMaker A2I Runtime diff --git a/services/sagemakerfeaturestoreruntime/pom.xml b/services/sagemakerfeaturestoreruntime/pom.xml new file mode 100644 index 000000000000..36df209a2376 --- /dev/null +++ b/services/sagemakerfeaturestoreruntime/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.40-SNAPSHOT + + sagemakerfeaturestoreruntime + AWS Java SDK :: Services :: Sage Maker Feature Store Runtime + The AWS Java SDK for Sage Maker Feature Store Runtime module holds the client classes that are used for + communicating with Sage Maker Feature Store Runtime. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.sagemakerfeaturestoreruntime + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/sagemakerfeaturestoreruntime/src/main/resources/codegen-resources/paginators-1.json b/services/sagemakerfeaturestoreruntime/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..5677bd8e4a2d --- /dev/null +++ b/services/sagemakerfeaturestoreruntime/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,4 @@ +{ + "pagination": { + } +} diff --git a/services/sagemakerfeaturestoreruntime/src/main/resources/codegen-resources/service-2.json b/services/sagemakerfeaturestoreruntime/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..f7a71ea7f971 --- /dev/null +++ b/services/sagemakerfeaturestoreruntime/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,249 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2020-07-01", + "endpointPrefix":"featurestore-runtime.sagemaker", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"Amazon SageMaker Feature Store Runtime", + "serviceId":"SageMaker FeatureStore Runtime", + "signatureVersion":"v4", + "signingName":"sagemaker", + "uid":"sagemaker-featurestore-runtime-2020-07-01" + }, + "operations":{ + "DeleteRecord":{ + "name":"DeleteRecord", + "http":{ + "method":"DELETE", + "requestUri":"/FeatureGroup/{FeatureGroupName}" + }, + "input":{"shape":"DeleteRecordRequest"}, + "errors":[ + {"shape":"ValidationError"}, + {"shape":"InternalFailure"}, + {"shape":"ServiceUnavailable"}, + {"shape":"AccessForbidden"} + ], + "documentation":"

Deletes a Record from a FeatureGroup. A new record will show up in the OfflineStore when the DeleteRecord API is called. This record will have a value of True in the is_deleted column.

" + }, + "GetRecord":{ + "name":"GetRecord", + "http":{ + "method":"GET", + "requestUri":"/FeatureGroup/{FeatureGroupName}" + }, + "input":{"shape":"GetRecordRequest"}, + "output":{"shape":"GetRecordResponse"}, + "errors":[ + {"shape":"ValidationError"}, + {"shape":"ResourceNotFound"}, + {"shape":"InternalFailure"}, + {"shape":"ServiceUnavailable"}, + {"shape":"AccessForbidden"} + ], + "documentation":"

Use for OnlineStore serving from a FeatureStore. Only the latest records stored in the OnlineStore can be retrieved. If no Record with RecordIdentifierValue is found, then an empty result is returned.

" + }, + "PutRecord":{ + "name":"PutRecord", + "http":{ + "method":"PUT", + "requestUri":"/FeatureGroup/{FeatureGroupName}" + }, + "input":{"shape":"PutRecordRequest"}, + "errors":[ + {"shape":"ValidationError"}, + {"shape":"InternalFailure"}, + {"shape":"ServiceUnavailable"}, + {"shape":"AccessForbidden"} + ], + "documentation":"

Used for data ingestion into the FeatureStore. The PutRecord API writes to both the OnlineStore and OfflineStore. If the record is the latest record for the recordIdentifier, the record is written to both the OnlineStore and OfflineStore. If the record is a historic record, it is written only to the OfflineStore.

" + } + }, + "shapes":{ + "AccessForbidden":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

You do not have permission to perform an action.

", + "error":{"httpStatusCode":403}, + "exception":true, + "synthetic":true + }, + "DeleteRecordRequest":{ + "type":"structure", + "required":[ + "FeatureGroupName", + "RecordIdentifierValueAsString", + "EventTime" + ], + "members":{ + "FeatureGroupName":{ + "shape":"FeatureGroupName", + "documentation":"

The name of the feature group to delete the record from.

", + "location":"uri", + "locationName":"FeatureGroupName" + }, + "RecordIdentifierValueAsString":{ + "shape":"ValueAsString", + "documentation":"

The value for the RecordIdentifier that uniquely identifies the record, in string format.

", + "location":"querystring", + "locationName":"RecordIdentifierValueAsString" + }, + "EventTime":{ + "shape":"ValueAsString", + "documentation":"

Timestamp indicating when the deletion event occurred. EventTime can be used to query data at a certain point in time.

", + "location":"querystring", + "locationName":"EventTime" + } + } + }, + "FeatureGroupName":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9])*" + }, + "FeatureName":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[a-zA-Z0-9]([-_]*[a-zA-Z0-9])*" + }, + "FeatureNames":{ + "type":"list", + "member":{"shape":"FeatureName"}, + "min":1 + }, + "FeatureValue":{ + "type":"structure", + "required":[ + "FeatureName", + "ValueAsString" + ], + "members":{ + "FeatureName":{ + "shape":"FeatureName", + "documentation":"

The name of a feature that a feature value corresponds to.

" + }, + "ValueAsString":{ + "shape":"ValueAsString", + "documentation":"

The value associated with a feature, in string format. Note that features types can be String, Integral, or Fractional. This value represents all three types as a string.

" + } + }, + "documentation":"

The value associated with a feature.

" + }, + "GetRecordRequest":{ + "type":"structure", + "required":[ + "FeatureGroupName", + "RecordIdentifierValueAsString" + ], + "members":{ + "FeatureGroupName":{ + "shape":"FeatureGroupName", + "documentation":"

The name of the feature group in which you want to put the records.

", + "location":"uri", + "locationName":"FeatureGroupName" + }, + "RecordIdentifierValueAsString":{ + "shape":"ValueAsString", + "documentation":"

The value that corresponds to RecordIdentifier type and uniquely identifies the record in the FeatureGroup.

", + "location":"querystring", + "locationName":"RecordIdentifierValueAsString" + }, + "FeatureNames":{ + "shape":"FeatureNames", + "documentation":"

List of names of Features to be retrieved. If not specified, the latest value for all the Features are returned.

", + "location":"querystring", + "locationName":"FeatureName" + } + } + }, + "GetRecordResponse":{ + "type":"structure", + "members":{ + "Record":{ + "shape":"Record", + "documentation":"

The record you requested. A list of FeatureValues.

" + } + } + }, + "InternalFailure":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

An internal failure occurred. Try your request again. If the problem persists, contact AWS customer support.

", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true, + "synthetic":true + }, + "Message":{ + "type":"string", + "max":2048 + }, + "PutRecordRequest":{ + "type":"structure", + "required":[ + "FeatureGroupName", + "Record" + ], + "members":{ + "FeatureGroupName":{ + "shape":"FeatureGroupName", + "documentation":"

The name of the feature group that you want to insert the record into.

", + "location":"uri", + "locationName":"FeatureGroupName" + }, + "Record":{ + "shape":"Record", + "documentation":"

List of FeatureValues to be inserted. This will be a full over-write. If you only want to update few of the feature values, do the following:

  • Use GetRecord to retrieve the latest record.

  • Update the record returned from GetRecord.

  • Use PutRecord to update feature values.

" + } + } + }, + "Record":{ + "type":"list", + "member":{"shape":"FeatureValue"}, + "min":1 + }, + "ResourceNotFound":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

A resource that is required to perform an action was not found.

", + "error":{"httpStatusCode":404}, + "exception":true + }, + "ServiceUnavailable":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

The service is currently unavailable.

", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true, + "synthetic":true + }, + "ValidationError":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

There was an error validating your request.

", + "error":{"httpStatusCode":400}, + "exception":true, + "synthetic":true + }, + "ValueAsString":{ + "type":"string", + "max":358400, + "pattern":".*" + } + }, + "documentation":"

Contains all data plane API operations and data types for the Amazon SageMaker Feature Store. Use this API to put, delete, and retrieve (get) features from a feature store.

Use the following operations to configure your OnlineStore and OfflineStore features, and to create and manage feature groups:

" +} diff --git a/services/sagemakerruntime/pom.xml b/services/sagemakerruntime/pom.xml index 23bb12b5920b..3151b0c0cb99 100644 --- a/services/sagemakerruntime/pom.xml +++ b/services/sagemakerruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT sagemakerruntime AWS Java SDK :: Services :: SageMaker Runtime diff --git a/services/savingsplans/pom.xml b/services/savingsplans/pom.xml index bc6e8e5ea662..f79a18d48ca3 100644 --- a/services/savingsplans/pom.xml +++ b/services/savingsplans/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT savingsplans AWS Java SDK :: Services :: Savingsplans diff --git a/services/schemas/pom.xml b/services/schemas/pom.xml index 921f6ffe7b8e..eb42e521b66e 100644 --- a/services/schemas/pom.xml +++ b/services/schemas/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT schemas AWS Java SDK :: Services :: Schemas diff --git a/services/secretsmanager/pom.xml b/services/secretsmanager/pom.xml index f78957ed02ee..b14ccdebf26c 100644 --- a/services/secretsmanager/pom.xml +++ b/services/secretsmanager/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT secretsmanager AWS Java SDK :: Services :: AWS Secrets Manager diff --git a/services/securityhub/pom.xml b/services/securityhub/pom.xml index 984146e922f9..9e7e630f43d0 100644 --- a/services/securityhub/pom.xml +++ b/services/securityhub/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT securityhub AWS Java SDK :: Services :: SecurityHub diff --git a/services/securityhub/src/main/resources/codegen-resources/paginators-1.json b/services/securityhub/src/main/resources/codegen-resources/paginators-1.json index c7924802d10c..7726e5357a8e 100644 --- a/services/securityhub/src/main/resources/codegen-resources/paginators-1.json +++ b/services/securityhub/src/main/resources/codegen-resources/paginators-1.json @@ -49,6 +49,11 @@ "input_token": "NextToken", "output_token": "NextToken", "limit_key": "MaxResults" + }, + "ListOrganizationAdminAccounts": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" } } } diff --git a/services/securityhub/src/main/resources/codegen-resources/service-2.json b/services/securityhub/src/main/resources/codegen-resources/service-2.json index 55f7e285541b..5b298cbf2829 100644 --- a/services/securityhub/src/main/resources/codegen-resources/service-2.json +++ b/services/securityhub/src/main/resources/codegen-resources/service-2.json @@ -27,7 +27,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidAccessException"} ], - "documentation":"

Accepts the invitation to be a member account and be monitored by the Security Hub master account that the invitation was sent from.

When the member account accepts the invitation, permission is granted to the master account to view findings generated in the member account.

" + "documentation":"

Accepts the invitation to be a member account and be monitored by the Security Hub master account that the invitation was sent from.

This operation is only used by member accounts that are not added through Organizations.

When the member account accepts the invitation, permission is granted to the master account to view findings generated in the member account.

" }, "BatchDisableStandards":{ "name":"BatchDisableStandards", @@ -142,7 +142,7 @@ {"shape":"InvalidAccessException"}, {"shape":"ResourceConflictException"} ], - "documentation":"

Creates a member association in Security Hub between the specified accounts and the account used to make the request, which is the master account. To successfully create a member, you must use this action from an account that already has Security Hub enabled. To enable Security Hub, you can use the EnableSecurityHub operation.

After you use CreateMembers to create member account associations in Security Hub, you must use the InviteMembers operation to invite the accounts to enable Security Hub and become member accounts in Security Hub.

If the account owner accepts the invitation, the account becomes a member account in Security Hub. A permissions policy is added that permits the master account to view the findings generated in the member account. When Security Hub is enabled in the invited account, findings start to be sent to both the member and master accounts.

To remove the association between the master and member accounts, use the DisassociateFromMasterAccount or DisassociateMembers operation.

" + "documentation":"

Creates a member association in Security Hub between the specified accounts and the account used to make the request, which is the master account. If you are integrated with Organizations, then the master account is the Security Hub administrator account that is designated by the organization management account.

CreateMembers is always used to add accounts that are not organization members.

For accounts that are part of an organization, CreateMembers is only used in the following cases:

  • Security Hub is not configured to automatically add new accounts in an organization.

  • The account was disassociated or deleted in Security Hub.

This action can only be used by an account that has Security Hub enabled. To enable Security Hub, you can use the EnableSecurityHub operation.

For accounts that are not organization members, you create the account association and then send an invitation to the member account. To send the invitation, you use the InviteMembers operation. If the account owner accepts the invitation, the account becomes a member account in Security Hub.

Accounts that are part of an organization do not receive an invitation. They automatically become a member account in Security Hub.

A permissions policy is added that permits the master account to view the findings generated in the member account. When Security Hub is enabled in a member account, findings are sent to both the member and master accounts.

To remove the association between the master and member accounts, use the DisassociateFromMasterAccount or DisassociateMembers operation.

" }, "DeclineInvitations":{ "name":"DeclineInvitations", @@ -158,7 +158,7 @@ {"shape":"InvalidAccessException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Declines invitations to become a member account.

" + "documentation":"

Declines invitations to become a member account.

This operation is only used by accounts that are not part of an organization. Organization accounts do not receive invitations.

" }, "DeleteActionTarget":{ "name":"DeleteActionTarget", @@ -208,7 +208,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidAccessException"} ], - "documentation":"

Deletes invitations received by the AWS account to become a member account.

" + "documentation":"

Deletes invitations received by the AWS account to become a member account.

This operation is only used by accounts that are not part of an organization. Organization accounts do not receive invitations.

" }, "DeleteMembers":{ "name":"DeleteMembers", @@ -225,7 +225,7 @@ {"shape":"LimitExceededException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Deletes the specified member accounts from Security Hub.

" + "documentation":"

Deletes the specified member accounts from Security Hub.

Can be used to delete member accounts that belong to an organization as well as member accounts that were invited manually.

" }, "DescribeActionTargets":{ "name":"DescribeActionTargets", @@ -260,6 +260,22 @@ ], "documentation":"

Returns details about the Hub resource in your account, including the HubArn and the time when you enabled Security Hub.

" }, + "DescribeOrganizationConfiguration":{ + "name":"DescribeOrganizationConfiguration", + "http":{ + "method":"GET", + "requestUri":"/organization/configuration" + }, + "input":{"shape":"DescribeOrganizationConfigurationRequest"}, + "output":{"shape":"DescribeOrganizationConfigurationResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"InvalidAccessException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Returns information about the Organizations configuration for Security Hub. Can only be called from a Security Hub administrator account.

" + }, "DescribeProducts":{ "name":"DescribeProducts", "http":{ @@ -324,6 +340,22 @@ ], "documentation":"

Disables the integration of the specified product with Security Hub. After the integration is disabled, findings from that product are no longer sent to Security Hub.

" }, + "DisableOrganizationAdminAccount":{ + "name":"DisableOrganizationAdminAccount", + "http":{ + "method":"POST", + "requestUri":"/organization/admin/disable" + }, + "input":{"shape":"DisableOrganizationAdminAccountRequest"}, + "output":{"shape":"DisableOrganizationAdminAccountResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"InvalidAccessException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Disables a Security Hub administrator account. Can only be called by the organization management account.

" + }, "DisableSecurityHub":{ "name":"DisableSecurityHub", "http":{ @@ -355,7 +387,7 @@ {"shape":"LimitExceededException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Disassociates the current Security Hub member account from the associated master account.

" + "documentation":"

Disassociates the current Security Hub member account from the associated master account.

This operation is only used by accounts that are not part of an organization. For organization accounts, only the master account (the designated Security Hub administrator) can disassociate a member account.

" }, "DisassociateMembers":{ "name":"DisassociateMembers", @@ -372,7 +404,7 @@ {"shape":"LimitExceededException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Disassociates the specified member accounts from the associated master account.

" + "documentation":"

Disassociates the specified member accounts from the associated master account.

Can be used to disassociate both accounts that are in an organization and accounts that were invited manually.

" }, "EnableImportFindingsForProduct":{ "name":"EnableImportFindingsForProduct", @@ -391,6 +423,22 @@ ], "documentation":"

Enables the integration of a partner product with Security Hub. Integrated products send findings to Security Hub.

When you enable a product integration, a permissions policy that grants permission for the product to send findings to Security Hub is applied.

" }, + "EnableOrganizationAdminAccount":{ + "name":"EnableOrganizationAdminAccount", + "http":{ + "method":"POST", + "requestUri":"/organization/admin/enable" + }, + "input":{"shape":"EnableOrganizationAdminAccountRequest"}, + "output":{"shape":"EnableOrganizationAdminAccountResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"InvalidAccessException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Designates the Security Hub administrator account for an organization. Can only be called by the organization management account.

" + }, "EnableSecurityHub":{ "name":"EnableSecurityHub", "http":{ @@ -505,7 +553,7 @@ {"shape":"LimitExceededException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Provides the details for the Security Hub master account for the current member account.

" + "documentation":"

Provides the details for the Security Hub master account for the current member account.

Can be used by both member accounts that are in an organization and accounts that were invited manually.

" }, "GetMembers":{ "name":"GetMembers", @@ -522,7 +570,7 @@ {"shape":"LimitExceededException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Returns the details for the Security Hub member accounts for the specified account IDs.

" + "documentation":"

Returns the details for the Security Hub member accounts for the specified account IDs.

A master account can be either a delegated Security Hub administrator account for an organization or a master account that enabled Security Hub manually.

The results include both member accounts that are in an organization and accounts that were invited manually.

" }, "InviteMembers":{ "name":"InviteMembers", @@ -539,7 +587,7 @@ {"shape":"LimitExceededException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Invites other AWS accounts to become member accounts for the Security Hub master account that the invitation is sent from.

Before you can use this action to invite a member, you must first use the CreateMembers action to create the member account in Security Hub.

When the account owner accepts the invitation to become a member account and enables Security Hub, the master account can view the findings generated from the member account.

" + "documentation":"

Invites other AWS accounts to become member accounts for the Security Hub master account that the invitation is sent from.

This operation is only used to invite accounts that do not belong to an organization. Organization accounts do not receive invitations.

Before you can use this action to invite a member, you must first use the CreateMembers action to create the member account in Security Hub.

When the account owner enables Security Hub and accepts the invitation to become a member account, the master account can view the findings generated from the member account.

" }, "ListEnabledProductsForImport":{ "name":"ListEnabledProductsForImport", @@ -570,7 +618,7 @@ {"shape":"InvalidAccessException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Lists all Security Hub membership invitations that were sent to the current AWS account.

" + "documentation":"

Lists all Security Hub membership invitations that were sent to the current AWS account.

This operation is only used by accounts that do not belong to an organization. Organization accounts do not receive invitations.

" }, "ListMembers":{ "name":"ListMembers", @@ -586,7 +634,23 @@ {"shape":"InvalidAccessException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Lists details about all member accounts for the current Security Hub master account.

" + "documentation":"

Lists details about all member accounts for the current Security Hub master account.

The results include both member accounts that belong to an organization and member accounts that were invited manually.

" + }, + "ListOrganizationAdminAccounts":{ + "name":"ListOrganizationAdminAccounts", + "http":{ + "method":"GET", + "requestUri":"/organization/admin" + }, + "input":{"shape":"ListOrganizationAdminAccountsRequest"}, + "output":{"shape":"ListOrganizationAdminAccountsResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"InvalidAccessException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Lists the Security Hub administrator accounts. Can only be called by the organization management account.

" }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -684,6 +748,22 @@ ], "documentation":"

Updates the Security Hub insight identified by the specified insight ARN.

" }, + "UpdateOrganizationConfiguration":{ + "name":"UpdateOrganizationConfiguration", + "http":{ + "method":"POST", + "requestUri":"/organization/configuration" + }, + "input":{"shape":"UpdateOrganizationConfigurationRequest"}, + "output":{"shape":"UpdateOrganizationConfigurationResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"InvalidAccessException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Used to update the configuration related to Organizations. Can only be called from a Security Hub administrator account.

" + }, "UpdateSecurityHubConfiguration":{ "name":"UpdateSecurityHubConfiguration", "http":{ @@ -753,6 +833,7 @@ }, "AccountDetails":{ "type":"structure", + "required":["AccountId"], "members":{ "AccountId":{ "shape":"AccountId", @@ -801,6 +882,36 @@ "type":"list", "member":{"shape":"ActionTarget"} }, + "AdminAccount":{ + "type":"structure", + "members":{ + "AccountId":{ + "shape":"NonEmptyString", + "documentation":"

The AWS account identifier of the Security Hub administrator account.

" + }, + "Status":{ + "shape":"AdminStatus", + "documentation":"

The current status of the Security Hub administrator account. Indicates whether the account is currently enabled as a Security Hub administrator.

" + } + }, + "documentation":"

Represents a Security Hub administrator account designated by an organization management account.

" + }, + "AdminAccounts":{ + "type":"list", + "member":{"shape":"AdminAccount"} + }, + "AdminStatus":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLE_IN_PROGRESS" + ] + }, + "AdminsMaxResults":{ + "type":"integer", + "max":10, + "min":1 + }, "ArnList":{ "type":"list", "member":{"shape":"NonEmptyString"} @@ -6222,10 +6333,11 @@ }, "CreateMembersRequest":{ "type":"structure", + "required":["AccountDetails"], "members":{ "AccountDetails":{ "shape":"AccountDetailsList", - "documentation":"

The list of accounts to associate with the Security Hub master account. For each account, the list includes the account ID and the email address.

" + "documentation":"

The list of accounts to associate with the Security Hub master account. For each account, the list includes the account ID and optionally the email address.

" } } }, @@ -6238,6 +6350,11 @@ } } }, + "CrossAccountMaxResults":{ + "type":"integer", + "max":50, + "min":1 + }, "Cvss":{ "type":"structure", "members":{ @@ -6384,6 +6501,7 @@ }, "DeleteMembersRequest":{ "type":"structure", + "required":["AccountIds"], "members":{ "AccountIds":{ "shape":"AccountIdList", @@ -6459,6 +6577,24 @@ } } }, + "DescribeOrganizationConfigurationRequest":{ + "type":"structure", + "members":{ + } + }, + "DescribeOrganizationConfigurationResponse":{ + "type":"structure", + "members":{ + "AutoEnable":{ + "shape":"Boolean", + "documentation":"

Whether to automatically enable Security Hub for new accounts in the organization.

If set to true, then Security Hub is enabled for new accounts. If set to false, then new accounts are not added automatically.

" + }, + "MemberAccountLimitReached":{ + "shape":"Boolean", + "documentation":"

Whether the maximum number of allowed member accounts are already associated with the Security Hub administrator account.

" + } + } + }, "DescribeProductsRequest":{ "type":"structure", "members":{ @@ -6496,7 +6632,7 @@ "members":{ "StandardsSubscriptionArn":{ "shape":"NonEmptyString", - "documentation":"

The ARN of a resource that represents your subscription to a supported standard.

", + "documentation":"

The ARN of a resource that represents your subscription to a supported standard. To get the subscription ARNs of the standards you have enabled, use the GetEnabledStandards operation.

", "location":"uri", "locationName":"StandardsSubscriptionArn" }, @@ -6574,6 +6710,21 @@ "members":{ } }, + "DisableOrganizationAdminAccountRequest":{ + "type":"structure", + "required":["AdminAccountId"], + "members":{ + "AdminAccountId":{ + "shape":"NonEmptyString", + "documentation":"

The AWS account identifier of the Security Hub administrator account.

" + } + } + }, + "DisableOrganizationAdminAccountResponse":{ + "type":"structure", + "members":{ + } + }, "DisableSecurityHubRequest":{ "type":"structure", "members":{ @@ -6596,6 +6747,7 @@ }, "DisassociateMembersRequest":{ "type":"structure", + "required":["AccountIds"], "members":{ "AccountIds":{ "shape":"AccountIdList", @@ -6628,6 +6780,21 @@ } } }, + "EnableOrganizationAdminAccountRequest":{ + "type":"structure", + "required":["AdminAccountId"], + "members":{ + "AdminAccountId":{ + "shape":"NonEmptyString", + "documentation":"

The AWS account identifier of the account to designate as the Security Hub administrator account.

" + } + } + }, + "EnableOrganizationAdminAccountResponse":{ + "type":"structure", + "members":{ + } + }, "EnableSecurityHubRequest":{ "type":"structure", "members":{ @@ -6952,7 +7119,7 @@ "Message":{"shape":"NonEmptyString"}, "Code":{"shape":"NonEmptyString"} }, - "documentation":"

AWS Security Hub isn't enabled for the account used to make this request.

", + "documentation":"

There is an issue with the account used to make the request. Either Security Hub is not enabled for the account, or the account does not have permission to perform this action.

", "error":{"httpStatusCode":401}, "exception":true }, @@ -6994,6 +7161,7 @@ }, "InviteMembersRequest":{ "type":"structure", + "required":["AccountIds"], "members":{ "AccountIds":{ "shape":"AccountIdList", @@ -7066,7 +7234,7 @@ "Message":{"shape":"NonEmptyString"}, "Code":{"shape":"NonEmptyString"} }, - "documentation":"

The request was rejected because it attempted to create resources beyond the current AWS account limits. The error code describes the limit exceeded.

", + "documentation":"

The request was rejected because it attempted to create resources beyond the current AWS account or throttling limits. The error code describes the limit exceeded.

", "error":{"httpStatusCode":429}, "exception":true }, @@ -7104,7 +7272,7 @@ "type":"structure", "members":{ "MaxResults":{ - "shape":"MaxResults", + "shape":"CrossAccountMaxResults", "documentation":"

The maximum number of items to return in the response.

", "location":"querystring", "locationName":"MaxResults" @@ -7135,12 +7303,12 @@ "members":{ "OnlyAssociated":{ "shape":"Boolean", - "documentation":"

Specifies which member accounts to include in the response based on their relationship status with the master account. The default value is TRUE.

If OnlyAssociated is set to TRUE, the response includes member accounts whose relationship status with the master is set to ENABLED or DISABLED.

If OnlyAssociated is set to FALSE, the response includes all existing member accounts.

", + "documentation":"

Specifies which member accounts to include in the response based on their relationship status with the master account. The default value is TRUE.

If OnlyAssociated is set to TRUE, the response includes member accounts whose relationship status with the master is set to ENABLED.

If OnlyAssociated is set to FALSE, the response includes all existing member accounts.

", "location":"querystring", "locationName":"OnlyAssociated" }, "MaxResults":{ - "shape":"MaxResults", + "shape":"CrossAccountMaxResults", "documentation":"

The maximum number of items to return in the response.

", "location":"querystring", "locationName":"MaxResults" @@ -7166,6 +7334,36 @@ } } }, + "ListOrganizationAdminAccountsRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"AdminsMaxResults", + "documentation":"

The maximum number of items to return in the response.

", + "location":"querystring", + "locationName":"MaxResults" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token that is required for pagination. On your first call to the ListOrganizationAdminAccounts operation, set the value of this parameter to NULL. For subsequent calls to the operation, to continue listing data, set the value of this parameter to the value returned from the previous response.

", + "location":"querystring", + "locationName":"NextToken" + } + } + }, + "ListOrganizationAdminAccountsResponse":{ + "type":"structure", + "members":{ + "AdminAccounts":{ + "shape":"AdminAccounts", + "documentation":"

The list of Security Hub administrator accounts.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token to use to request the next page of results.

" + } + } + }, "ListTagsForResourceRequest":{ "type":"structure", "required":["ResourceArn"], @@ -7308,7 +7506,7 @@ }, "MemberStatus":{ "shape":"NonEmptyString", - "documentation":"

The status of the relationship between the member account and its master account.

The status can have one of the following values:

  • CREATED - Indicates that the master account added the member account, but has not yet invited the member account.

  • INVITED - Indicates that the master account invited the member account. The member account has not yet responded to the invitation.

  • ASSOCIATED - Indicates that the member account accepted the invitation.

  • REMOVED - Indicates that the master account disassociated the member account.

  • RESIGNED - Indicates that the member account disassociated themselves from the master account.

  • DELETED - Indicates that the master account deleted the member account.

" + "documentation":"

The status of the relationship between the member account and its master account.

The status can have one of the following values:

  • CREATED - Indicates that the master account added the member account, but has not yet invited the member account.

  • INVITED - Indicates that the master account invited the member account. The member account has not yet responded to the invitation.

  • ENABLED - Indicates that the member account is currently active. For manually invited member accounts, indicates that the member account accepted the invitation.

  • REMOVED - Indicates that the master account disassociated the member account.

  • RESIGNED - Indicates that the member account disassociated themselves from the master account.

  • DELETED - Indicates that the master account deleted the member account.

" }, "InvitedAt":{ "shape":"Timestamp", @@ -8524,6 +8722,21 @@ "members":{ } }, + "UpdateOrganizationConfigurationRequest":{ + "type":"structure", + "required":["AutoEnable"], + "members":{ + "AutoEnable":{ + "shape":"Boolean", + "documentation":"

Whether to automatically enable Security Hub for new accounts in the organization.

By default, this is false, and new accounts are not added automatically.

To automatically enable Security Hub for new accounts, set this to true.

" + } + } + }, + "UpdateOrganizationConfigurationResponse":{ + "type":"structure", + "members":{ + } + }, "UpdateSecurityHubConfigurationRequest":{ "type":"structure", "members":{ diff --git a/services/serverlessapplicationrepository/pom.xml b/services/serverlessapplicationrepository/pom.xml index d9d993d306bc..aba794f5bdd5 100644 --- a/services/serverlessapplicationrepository/pom.xml +++ b/services/serverlessapplicationrepository/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT 4.0.0 serverlessapplicationrepository diff --git a/services/servicecatalog/pom.xml b/services/servicecatalog/pom.xml index 2838578ee1c1..f8fd4d1f833a 100644 --- a/services/servicecatalog/pom.xml +++ b/services/servicecatalog/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT servicecatalog AWS Java SDK :: Services :: AWS Service Catalog diff --git a/services/servicecatalog/src/main/resources/codegen-resources/service-2.json b/services/servicecatalog/src/main/resources/codegen-resources/service-2.json index 0369bd85914d..8a76284b8e82 100644 --- a/services/servicecatalog/src/main/resources/codegen-resources/service-2.json +++ b/services/servicecatalog/src/main/resources/codegen-resources/service-2.json @@ -751,6 +751,22 @@ ], "documentation":"

This API takes either a ProvisonedProductId or a ProvisionedProductName, along with a list of one or more output keys, and responds with the key/value pairs of those outputs.

" }, + "ImportAsProvisionedProduct":{ + "name":"ImportAsProvisionedProduct", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ImportAsProvisionedProductInput"}, + "output":{"shape":"ImportAsProvisionedProductOutput"}, + "errors":[ + {"shape":"DuplicateResourceException"}, + {"shape":"InvalidStateException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParametersException"} + ], + "documentation":"

Requests the import of a resource as a Service Catalog provisioned product that is associated to a Service Catalog product and provisioning artifact. Once imported all supported Service Catalog governance actions are supported on the provisioned product.

Resource import only supports CloudFormation stack ARNs. CloudFormation StackSets and non-root nested stacks are not supported.

The CloudFormation stack must have one of the following statuses to be imported: CREATE_COMPLETE, UPDATE_COMPLETE, UPDATE_ROLLBACK_COMPLETE, IMPORT_COMPLETE, IMPORT_ROLLBACK_COMPLETE.

Import of the resource requires that the CloudFormation stack template matches the associated Service Catalog product provisioning artifact.

" + }, "ListAcceptedPortfolioShares":{ "name":"ListAcceptedPortfolioShares", "http":{ @@ -1833,7 +1849,7 @@ }, "ProvisioningArtifactParameters":{ "shape":"ProvisioningArtifactProperties", - "documentation":"

The configuration of the provisioning artifact.

" + "documentation":"

The configuration of the provisioning artifact. The info field accepts ImportFromPhysicalID.

" }, "IdempotencyToken":{ "shape":"IdempotencyToken", @@ -1851,7 +1867,7 @@ }, "ProvisioningArtifactDetail":{ "shape":"ProvisioningArtifactDetail", - "documentation":"

Information about the provisioning artifact.

" + "documentation":"

Information about the provisioning artifact.

" }, "Tags":{ "shape":"Tags", @@ -1960,7 +1976,7 @@ }, "Parameters":{ "shape":"ProvisioningArtifactProperties", - "documentation":"

The configuration for the provisioning artifact.

" + "documentation":"

The configuration for the provisioning artifact. The info field accepts ImportFromPhysicalID.

" }, "IdempotencyToken":{ "shape":"IdempotencyToken", @@ -2479,11 +2495,11 @@ }, "Id":{ "shape":"Id", - "documentation":"

The provisioned product identifier.

" + "documentation":"

The provisioned product identifier. You must provide the name or ID, but not both.

If you do not provide a name or ID, or you provide both name and ID, an InvalidParametersException will occur.

" }, "Name":{ "shape":"ProvisionedProductName", - "documentation":"

The name of the provisioned product.

" + "documentation":"

The name of the provisioned product. You must provide the name or ID, but not both.

If you do not provide a name or ID, or you provide both name and ID, an InvalidParametersException will occur.

" } }, "documentation":"DescribeProvisionedProductAPI input structure. AcceptLanguage - [Optional] The language code for localization. Id - [Optional] The provisioned product identifier. Name - [Optional] Another provisioned product identifier. Customers must provide either Id or Name." @@ -3147,6 +3163,49 @@ "pattern":"[a-zA-Z0-9][a-zA-Z0-9_-]*" }, "IgnoreErrors":{"type":"boolean"}, + "ImportAsProvisionedProductInput":{ + "type":"structure", + "required":[ + "ProductId", + "ProvisioningArtifactId", + "ProvisionedProductName", + "PhysicalId", + "IdempotencyToken" + ], + "members":{ + "AcceptLanguage":{ + "shape":"AcceptLanguage", + "documentation":"

The language code.

  • en - English (default)

  • jp - Japanese

  • zh - Chinese

" + }, + "ProductId":{ + "shape":"Id", + "documentation":"

The product identifier.

" + }, + "ProvisioningArtifactId":{ + "shape":"Id", + "documentation":"

The identifier of the provisioning artifact.

" + }, + "ProvisionedProductName":{ + "shape":"ProvisionedProductName", + "documentation":"

The user-friendly name of the provisioned product. The value must be unique for the AWS account. The name cannot be updated after the product is provisioned.

" + }, + "PhysicalId":{ + "shape":"PhysicalId", + "documentation":"

The unique identifier of the resource to be imported. It only currently supports CloudFormation stack IDs.

" + }, + "IdempotencyToken":{ + "shape":"IdempotencyToken", + "documentation":"

A unique identifier that you provide to ensure idempotency. If multiple requests differ only by the idempotency token, the same response is returned for each repeated request.

", + "idempotencyToken":true + } + } + }, + "ImportAsProvisionedProductOutput":{ + "type":"structure", + "members":{ + "RecordDetail":{"shape":"RecordDetail"} + } + }, "InstructionType":{"type":"string"}, "InstructionValue":{"type":"string"}, "InvalidParametersException":{ @@ -3421,7 +3480,7 @@ "documentation":"

The page token for the next set of results. To retrieve the first set of results, use null.

" }, "PageSize":{ - "shape":"PageSize", + "shape":"PageSizeMax100", "documentation":"

The maximum number of items to return with this call.

" } } @@ -3952,6 +4011,11 @@ "max":20, "min":0 }, + "PageSizeMax100":{ + "type":"integer", + "max":100, + "min":0 + }, "PageToken":{ "type":"string", "max":2024, @@ -4227,7 +4291,10 @@ }, "PropertyKey":{ "type":"string", - "enum":["OWNER"], + "enum":[ + "OWNER", + "LAUNCH_ROLE" + ], "max":128, "min":1 }, @@ -4235,7 +4302,7 @@ "PropertyValue":{ "type":"string", "max":1024, - "min":1 + "min":0 }, "ProviderName":{ "type":"string", @@ -4453,6 +4520,10 @@ "ProvisioningArtifactId":{ "shape":"Id", "documentation":"

The identifier of the provisioning artifact. For example, pa-4abcdjnxjj6ne.

" + }, + "LaunchRoleArn":{ + "shape":"RoleArn", + "documentation":"

The ARN of the launch role associated with the provisioned product.

" } }, "documentation":"

Information about a provisioned product.

" @@ -4980,6 +5051,10 @@ "RecordTags":{ "shape":"RecordTags", "documentation":"

One or more tags.

" + }, + "LaunchRoleArn":{ + "shape":"RoleArn", + "documentation":"

The ARN of the launch role associated with the provisioned product.

" } }, "documentation":"

Information about a request operation.

" @@ -5255,6 +5330,13 @@ "documentation":"

Information about a change to a resource attribute.

" }, "ResourceType":{"type":"string"}, + "RetainPhysicalResources":{"type":"boolean"}, + "RoleArn":{ + "type":"string", + "max":1224, + "min":1, + "pattern":"arn:[a-z0-9-\\.]{1,63}:iam::[a-z0-9-\\.]{0,63}:role\\/.{0,1023}" + }, "ScanProvisionedProductsInput":{ "type":"structure", "members":{ @@ -5857,6 +5939,10 @@ "AcceptLanguage":{ "shape":"AcceptLanguage", "documentation":"

The language code.

  • en - English (default)

  • jp - Japanese

  • zh - Chinese

" + }, + "RetainPhysicalResources":{ + "shape":"RetainPhysicalResources", + "documentation":"

When this boolean parameter is set to true, the TerminateProvisionedProduct API deletes the Service Catalog provisioned product. However, it does not remove the CloudFormation stack, stack set, or the underlying resources of the deleted provisioned product. The default value is false.

" } } }, @@ -6105,7 +6191,7 @@ }, "ProvisionedProductProperties":{ "shape":"ProvisionedProductProperties", - "documentation":"

A map that contains the provisioned product properties to be updated.

The OWNER key accepts user ARNs and role ARNs. The owner is the user that is allowed to see, update, terminate, and execute service actions in the provisioned product.

The administrator can change the owner of a provisioned product to another IAM user within the same account. Both end user owners and administrators can see ownership history of the provisioned product using the ListRecordHistory API. The new owner can describe all past records for the provisioned product using the DescribeRecord API. The previous owner can no longer use DescribeRecord, but can still see the product's history from when he was an owner using ListRecordHistory.

If a provisioned product ownership is assigned to an end user, they can see and perform any action through the API or Service Catalog console such as update, terminate, and execute service actions. If an end user provisions a product and the owner is updated to someone else, they will no longer be able to see or perform any actions through API or the Service Catalog console on that provisioned product.

" + "documentation":"

A map that contains the provisioned product properties to be updated.

The LAUNCH_ROLE key accepts role ARNs. This key allows an administrator to call UpdateProvisionedProductProperties to update the launch role that is associated with a provisioned product. This role is used when an end user calls a provisioning operation such as UpdateProvisionedProduct, TerminateProvisionedProduct, or ExecuteProvisionedProductServiceAction. Only a role ARN is valid. A user ARN is invalid.

The OWNER key accepts user ARNs and role ARNs. The owner is the user that has permission to see, update, terminate, and execute service actions in the provisioned product.

The administrator can change the owner of a provisioned product to another IAM user within the same account. Both end user owners and administrators can see ownership history of the provisioned product using the ListRecordHistory API. The new owner can describe all past records for the provisioned product using the DescribeRecord API. The previous owner can no longer use DescribeRecord, but can still see the product's history from when he was an owner using ListRecordHistory.

If a provisioned product ownership is assigned to an end user, they can see and perform any action through the API or Service Catalog console such as update, terminate, and execute service actions. If an end user provisions a product and the owner is updated to someone else, they will no longer be able to see or perform any actions through API or the Service Catalog console on that provisioned product.

" }, "IdempotencyToken":{ "shape":"IdempotencyToken", @@ -6331,5 +6417,5 @@ "UserArnSession":{"type":"string"}, "Verbose":{"type":"boolean"} }, - "documentation":"AWS Service Catalog

AWS Service Catalog enables organizations to create and manage catalogs of IT services that are approved for use on AWS. To get the most out of this documentation, you should be familiar with the terminology discussed in AWS Service Catalog Concepts.

" + "documentation":"AWS Service Catalog

AWS Service Catalog enables organizations to create and manage catalogs of IT services that are approved for AWS. To get the most out of this documentation, you should be familiar with the terminology discussed in AWS Service Catalog Concepts.

" } diff --git a/services/servicecatalogappregistry/pom.xml b/services/servicecatalogappregistry/pom.xml new file mode 100644 index 000000000000..c5e161d4c0fa --- /dev/null +++ b/services/servicecatalogappregistry/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.15.40-SNAPSHOT + + servicecatalogappregistry + AWS Java SDK :: Services :: Service Catalog App Registry + The AWS Java SDK for Service Catalog App Registry module holds the client classes that are used for + communicating with Service Catalog App Registry. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.servicecatalogappregistry + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/servicecatalogappregistry/src/main/resources/codegen-resources/paginators-1.json b/services/servicecatalogappregistry/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..8c9a3bd7c9f8 --- /dev/null +++ b/services/servicecatalogappregistry/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,28 @@ +{ + "pagination": { + "ListApplications": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "applications" + }, + "ListAssociatedAttributeGroups": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "attributeGroups" + }, + "ListAssociatedResources": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "resources" + }, + "ListAttributeGroups": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "attributeGroups" + } + } +} diff --git a/services/servicecatalogappregistry/src/main/resources/codegen-resources/service-2.json b/services/servicecatalogappregistry/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..f2c6d677ffa5 --- /dev/null +++ b/services/servicecatalogappregistry/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,1146 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2020-06-24", + "endpointPrefix":"servicecatalog-appregistry", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceAbbreviation":"AppRegistry", + "serviceFullName":"AWS Service Catalog App Registry", + "serviceId":"Service Catalog AppRegistry", + "signatureVersion":"v4", + "signingName":"servicecatalog", + "uid":"AWS242AppRegistry-2020-06-24" + }, + "operations":{ + "AssociateAttributeGroup":{ + "name":"AssociateAttributeGroup", + "http":{ + "method":"PUT", + "requestUri":"/applications/{application}/attribute-groups/{attributeGroup}" + }, + "input":{"shape":"AssociateAttributeGroupRequest"}, + "output":{"shape":"AssociateAttributeGroupResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

Associates an attribute group with an application to augment the application's metadata with the group's attributes. This feature enables applications to be described with user-defined details that are machine-readable, such as third-party integrations.

" + }, + "AssociateResource":{ + "name":"AssociateResource", + "http":{ + "method":"PUT", + "requestUri":"/applications/{application}/resources/{resourceType}/{resource}" + }, + "input":{"shape":"AssociateResourceRequest"}, + "output":{"shape":"AssociateResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Associates a resource with an application. Both the resource and the application can be specified either by ID or name.

" + }, + "CreateApplication":{ + "name":"CreateApplication", + "http":{ + "method":"POST", + "requestUri":"/applications", + "responseCode":201 + }, + "input":{"shape":"CreateApplicationRequest"}, + "output":{"shape":"CreateApplicationResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Creates a new application that is the top-level node in a hierarchy of related cloud resource abstractions.

" + }, + "CreateAttributeGroup":{ + "name":"CreateAttributeGroup", + "http":{ + "method":"POST", + "requestUri":"/attribute-groups", + "responseCode":201 + }, + "input":{"shape":"CreateAttributeGroupRequest"}, + "output":{"shape":"CreateAttributeGroupResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Creates a new attribute group as a container for user-defined attributes. This feature enables users to have full control over their cloud application's metadata in a rich machine-readable format to facilitate integration with automated workflows and third-party tools.

" + }, + "DeleteApplication":{ + "name":"DeleteApplication", + "http":{ + "method":"DELETE", + "requestUri":"/applications/{application}" + }, + "input":{"shape":"DeleteApplicationRequest"}, + "output":{"shape":"DeleteApplicationResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Deletes an application that is specified either by its application ID or name. All associated attribute groups and resources must be disassociated from it before deleting an application.

" + }, + "DeleteAttributeGroup":{ + "name":"DeleteAttributeGroup", + "http":{ + "method":"DELETE", + "requestUri":"/attribute-groups/{attributeGroup}" + }, + "input":{"shape":"DeleteAttributeGroupRequest"}, + "output":{"shape":"DeleteAttributeGroupResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Deletes an attribute group, specified either by its attribute group ID or name.

" + }, + "DisassociateAttributeGroup":{ + "name":"DisassociateAttributeGroup", + "http":{ + "method":"DELETE", + "requestUri":"/applications/{application}/attribute-groups/{attributeGroup}" + }, + "input":{"shape":"DisassociateAttributeGroupRequest"}, + "output":{"shape":"DisassociateAttributeGroupResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Disassociates an attribute group from an application to remove the extra attributes contained in the attribute group from the application's metadata. This operation reverts AssociateAttributeGroup.

" + }, + "DisassociateResource":{ + "name":"DisassociateResource", + "http":{ + "method":"DELETE", + "requestUri":"/applications/{application}/resources/{resourceType}/{resource}" + }, + "input":{"shape":"DisassociateResourceRequest"}, + "output":{"shape":"DisassociateResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Disassociates a resource from application. Both the resource and the application can be specified either by ID or name.

" + }, + "GetApplication":{ + "name":"GetApplication", + "http":{ + "method":"GET", + "requestUri":"/applications/{application}" + }, + "input":{"shape":"GetApplicationRequest"}, + "output":{"shape":"GetApplicationResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Retrieves metadata information about one of your applications. The application can be specified either by its unique ID or by its name (which is unique within one account in one region at a given point in time). Specify by ID in automated workflows if you want to make sure that the exact same application is returned or a ResourceNotFoundException is thrown, avoiding the ABA addressing problem.

" + }, + "GetAttributeGroup":{ + "name":"GetAttributeGroup", + "http":{ + "method":"GET", + "requestUri":"/attribute-groups/{attributeGroup}" + }, + "input":{"shape":"GetAttributeGroupRequest"}, + "output":{"shape":"GetAttributeGroupResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Retrieves an attribute group, either by its name or its ID. The attribute group can be specified either by its unique ID or by its name.

" + }, + "ListApplications":{ + "name":"ListApplications", + "http":{ + "method":"GET", + "requestUri":"/applications" + }, + "input":{"shape":"ListApplicationsRequest"}, + "output":{"shape":"ListApplicationsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Retrieves a list of all of your applications. Results are paginated.

", + "idempotent":true + }, + "ListAssociatedAttributeGroups":{ + "name":"ListAssociatedAttributeGroups", + "http":{ + "method":"GET", + "requestUri":"/applications/{application}/attribute-groups" + }, + "input":{"shape":"ListAssociatedAttributeGroupsRequest"}, + "output":{"shape":"ListAssociatedAttributeGroupsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Lists all attribute groups that are associated with specified application. Results are paginated.

", + "idempotent":true + }, + "ListAssociatedResources":{ + "name":"ListAssociatedResources", + "http":{ + "method":"GET", + "requestUri":"/applications/{application}/resources" + }, + "input":{"shape":"ListAssociatedResourcesRequest"}, + "output":{"shape":"ListAssociatedResourcesResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Lists all resources that are associated with specified application. Results are paginated.

", + "idempotent":true + }, + "ListAttributeGroups":{ + "name":"ListAttributeGroups", + "http":{ + "method":"GET", + "requestUri":"/attribute-groups" + }, + "input":{"shape":"ListAttributeGroupsRequest"}, + "output":{"shape":"ListAttributeGroupsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Lists all attribute groups which you have access to. Results are paginated.

", + "idempotent":true + }, + "UpdateApplication":{ + "name":"UpdateApplication", + "http":{ + "method":"PATCH", + "requestUri":"/applications/{application}" + }, + "input":{"shape":"UpdateApplicationRequest"}, + "output":{"shape":"UpdateApplicationResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Updates an existing application with new attributes.

" + }, + "UpdateAttributeGroup":{ + "name":"UpdateAttributeGroup", + "http":{ + "method":"PATCH", + "requestUri":"/attribute-groups/{attributeGroup}" + }, + "input":{"shape":"UpdateAttributeGroupRequest"}, + "output":{"shape":"UpdateAttributeGroupResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Updates an existing attribute group with new details.

" + } + }, + "shapes":{ + "Application":{ + "type":"structure", + "members":{ + "id":{ + "shape":"ApplicationId", + "documentation":"

The identifier of the application.

" + }, + "arn":{ + "shape":"ApplicationArn", + "documentation":"

The Amazon resource name (ARN) that specifies the application across services.

" + }, + "name":{ + "shape":"Name", + "documentation":"

The name of the application. The name must be unique in the region in which you are creating the application.

" + }, + "description":{ + "shape":"Description", + "documentation":"

The description of the application.

" + }, + "creationTime":{ + "shape":"Timestamp", + "documentation":"

The ISO-8601 formatted timestamp of the moment when the application was created.

" + }, + "lastUpdateTime":{ + "shape":"Timestamp", + "documentation":"

The ISO-8601 formatted timestamp of the moment when the application was last updated.

" + }, + "tags":{ + "shape":"Tags", + "documentation":"

Key-value pairs you can use to associate with the application.

" + } + }, + "documentation":"

Represents a Service Catalog AppRegistry application that is the top-level node in a hierarchy of related cloud resource abstractions.

" + }, + "ApplicationArn":{ + "type":"string", + "pattern":"arn:aws[-a-z]*:servicecatalog:[a-z]{2}(-gov)?-[a-z]+-\\d:\\d{12}:/applications/[a-z0-9]+" + }, + "ApplicationId":{ + "type":"string", + "pattern":"[a-z0-9]{12}" + }, + "ApplicationSpecifier":{ + "type":"string", + "max":256, + "min":1, + "pattern":"\\w+" + }, + "ApplicationSummaries":{ + "type":"list", + "member":{"shape":"ApplicationSummary"} + }, + "ApplicationSummary":{ + "type":"structure", + "members":{ + "id":{ + "shape":"ApplicationId", + "documentation":"

The identifier of the application.

" + }, + "arn":{ + "shape":"ApplicationArn", + "documentation":"

The Amazon resource name (ARN) that specifies the application across services.

" + }, + "name":{ + "shape":"Name", + "documentation":"

The name of the application. The name must be unique in the region in which you are creating the application.

" + }, + "description":{ + "shape":"Description", + "documentation":"

The description of the application.

" + }, + "creationTime":{ + "shape":"Timestamp", + "documentation":"

The ISO-8601 formatted timestamp of the moment when the application was created.

" + }, + "lastUpdateTime":{ + "shape":"Timestamp", + "documentation":"

The ISO-8601 formatted timestamp of the moment when the application was last updated.

" + } + }, + "documentation":"

Summary of a Service Catalog AppRegistry application.

" + }, + "Arn":{ + "type":"string", + "max":1600, + "min":1, + "pattern":"arn:(aws[a-zA-Z0-9-]*):([a-zA-Z0-9\\-])+:([a-z]{2}(-gov)?-[a-z]+-\\d{1})?:(\\d{12})?:(.*)" + }, + "AssociateAttributeGroupRequest":{ + "type":"structure", + "required":[ + "application", + "attributeGroup" + ], + "members":{ + "application":{ + "shape":"ApplicationSpecifier", + "documentation":"

The name or ID of the application.

", + "location":"uri", + "locationName":"application" + }, + "attributeGroup":{ + "shape":"AttributeGroupSpecifier", + "documentation":"

The name or ID of the attribute group that holds the attributes to describe the application.

", + "location":"uri", + "locationName":"attributeGroup" + } + } + }, + "AssociateAttributeGroupResponse":{ + "type":"structure", + "members":{ + "applicationArn":{ + "shape":"ApplicationArn", + "documentation":"

The Amazon resource name (ARN) of the application that was augmented with attributes.

" + }, + "attributeGroupArn":{ + "shape":"AttributeGroupArn", + "documentation":"

The Amazon resource name (ARN) of the attribute group that contains the application's new attributes.

" + } + } + }, + "AssociateResourceRequest":{ + "type":"structure", + "required":[ + "application", + "resourceType", + "resource" + ], + "members":{ + "application":{ + "shape":"ApplicationSpecifier", + "documentation":"

The name or ID of the application.

", + "location":"uri", + "locationName":"application" + }, + "resourceType":{ + "shape":"ResourceType", + "documentation":"

The type of resource of which the application will be associated.

", + "location":"uri", + "locationName":"resourceType" + }, + "resource":{ + "shape":"ResourceSpecifier", + "documentation":"

The name or ID of the resource of which the application will be associated.

", + "location":"uri", + "locationName":"resource" + } + } + }, + "AssociateResourceResponse":{ + "type":"structure", + "members":{ + "applicationArn":{ + "shape":"ApplicationArn", + "documentation":"

The Amazon resource name (ARN) of the application that was augmented with attributes.

" + }, + "resourceArn":{ + "shape":"Arn", + "documentation":"

The Amazon resource name (ARN) that specifies the resource.

" + } + } + }, + "AssociationCount":{ + "type":"integer", + "min":0 + }, + "AttributeGroup":{ + "type":"structure", + "members":{ + "id":{ + "shape":"AttributeGroupId", + "documentation":"

The globally unique attribute group identifier of the attribute group.

" + }, + "arn":{ + "shape":"AttributeGroupArn", + "documentation":"

The Amazon resource name (ARN) that specifies the attribute group across services.

" + }, + "name":{ + "shape":"Name", + "documentation":"

The name of the attribute group.

" + }, + "description":{ + "shape":"Description", + "documentation":"

The description of the attribute group that the user provides.

" + }, + "creationTime":{ + "shape":"Timestamp", + "documentation":"

The ISO-8601 formatted timestamp of the moment the attribute group was created.

" + }, + "lastUpdateTime":{ + "shape":"Timestamp", + "documentation":"

The ISO-8601 formatted timestamp of the moment the attribute group was last updated. This time is the same as the creationTime for a newly created attribute group.

" + }, + "tags":{ + "shape":"Tags", + "documentation":"

Key-value pairs you can use to associate with the attribute group.

" + } + }, + "documentation":"

Represents a Service Catalog AppRegistry attribute group that is rich metadata which describes an application and its components.

" + }, + "AttributeGroupArn":{ + "type":"string", + "pattern":"arn:aws[-a-z]*:servicecatalog:[a-z]{2}(-gov)?-[a-z]+-\\d:\\d{12}:/attribute-groups/[a-z0-9]+" + }, + "AttributeGroupId":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[a-z0-9]{12}" + }, + "AttributeGroupIds":{ + "type":"list", + "member":{"shape":"AttributeGroupId"} + }, + "AttributeGroupSpecifier":{ + "type":"string", + "max":256, + "min":1, + "pattern":"\\w+" + }, + "AttributeGroupSummaries":{ + "type":"list", + "member":{"shape":"AttributeGroupSummary"} + }, + "AttributeGroupSummary":{ + "type":"structure", + "members":{ + "id":{ + "shape":"AttributeGroupId", + "documentation":"

The globally unique attribute group identifier of the attribute group.

" + }, + "arn":{ + "shape":"AttributeGroupArn", + "documentation":"

The Amazon resource name (ARN) that specifies the attribute group across services.

" + }, + "name":{ + "shape":"Name", + "documentation":"

The name of the attribute group.

" + }, + "description":{ + "shape":"Description", + "documentation":"

The description of the attribute group that the user provides.

" + }, + "creationTime":{ + "shape":"Timestamp", + "documentation":"

The ISO-8601 formatted timestamp of the moment the attribute group was created.

" + }, + "lastUpdateTime":{ + "shape":"Timestamp", + "documentation":"

The ISO-8601 formatted timestamp of the moment the attribute group was last updated. This time is the same as the creationTime for a newly created attribute group.

" + } + }, + "documentation":"

Summary of a Service Catalog AppRegistry attribute group.

" + }, + "Attributes":{ + "type":"string", + "max":8000, + "min":1, + "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u00FF]+" + }, + "ClientToken":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[a-zA-Z0-9][a-zA-Z0-9_-]*" + }, + "ConflictException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

There was a conflict when processing the request (for example, a resource with the given name already exists within the account).

", + "error":{"httpStatusCode":409}, + "exception":true + }, + "CreateApplicationRequest":{ + "type":"structure", + "required":[ + "name", + "clientToken" + ], + "members":{ + "name":{ + "shape":"Name", + "documentation":"

The name of the application. The name must be unique in the region in which you are creating the application.

" + }, + "description":{ + "shape":"Description", + "documentation":"

The description of the application.

" + }, + "tags":{ + "shape":"Tags", + "documentation":"

Key-value pairs you can use to associate with the application.

" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique identifier that you provide to ensure idempotency. If you retry a request that completed successfully using the same client token and the same parameters, the retry succeeds without performing any further actions. If you retry a successful request using the same client token, but one or more of the parameters are different, the retry fails.

", + "idempotencyToken":true + } + } + }, + "CreateApplicationResponse":{ + "type":"structure", + "members":{ + "application":{ + "shape":"Application", + "documentation":"

Information about the application.

" + } + } + }, + "CreateAttributeGroupRequest":{ + "type":"structure", + "required":[ + "name", + "attributes", + "clientToken" + ], + "members":{ + "name":{ + "shape":"Name", + "documentation":"

The name of the attribute group.

" + }, + "description":{ + "shape":"Description", + "documentation":"

The description of the attribute group that the user provides.

" + }, + "attributes":{ + "shape":"Attributes", + "documentation":"

A JSON string in the form of nested key-value pairs that represent the attributes in the group and describes an application and its components.

" + }, + "tags":{ + "shape":"Tags", + "documentation":"

Key-value pairs you can use to associate with the attribute group.

" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique identifier that you provide to ensure idempotency. If you retry a request that completed successfully using the same client token and the same parameters, the retry succeeds without performing any further actions. If you retry a successful request using the same client token, but one or more of the parameters are different, the retry fails.

", + "idempotencyToken":true + } + } + }, + "CreateAttributeGroupResponse":{ + "type":"structure", + "members":{ + "attributeGroup":{ + "shape":"AttributeGroup", + "documentation":"

Information about the attribute group.

" + } + } + }, + "DeleteApplicationRequest":{ + "type":"structure", + "required":["application"], + "members":{ + "application":{ + "shape":"ApplicationSpecifier", + "documentation":"

The name or ID of the application.

", + "location":"uri", + "locationName":"application" + } + } + }, + "DeleteApplicationResponse":{ + "type":"structure", + "members":{ + "application":{ + "shape":"ApplicationSummary", + "documentation":"

Information about the deleted application.

" + } + } + }, + "DeleteAttributeGroupRequest":{ + "type":"structure", + "required":["attributeGroup"], + "members":{ + "attributeGroup":{ + "shape":"AttributeGroupSpecifier", + "documentation":"

The name or ID of the attribute group that holds the attributes to describe the application.

", + "location":"uri", + "locationName":"attributeGroup" + } + } + }, + "DeleteAttributeGroupResponse":{ + "type":"structure", + "members":{ + "attributeGroup":{ + "shape":"AttributeGroupSummary", + "documentation":"

Information about the deleted attribute group.

" + } + } + }, + "Description":{ + "type":"string", + "max":1024 + }, + "DisassociateAttributeGroupRequest":{ + "type":"structure", + "required":[ + "application", + "attributeGroup" + ], + "members":{ + "application":{ + "shape":"ApplicationSpecifier", + "documentation":"

The name or ID of the application.

", + "location":"uri", + "locationName":"application" + }, + "attributeGroup":{ + "shape":"AttributeGroupSpecifier", + "documentation":"

The name or ID of the attribute group that holds the attributes to describe the application.

", + "location":"uri", + "locationName":"attributeGroup" + } + } + }, + "DisassociateAttributeGroupResponse":{ + "type":"structure", + "members":{ + "applicationArn":{ + "shape":"ApplicationArn", + "documentation":"

The Amazon resource name (ARN) that specifies the application.

" + }, + "attributeGroupArn":{ + "shape":"AttributeGroupArn", + "documentation":"

The Amazon resource name (ARN) that specifies the attribute group.

" + } + } + }, + "DisassociateResourceRequest":{ + "type":"structure", + "required":[ + "application", + "resourceType", + "resource" + ], + "members":{ + "application":{ + "shape":"ApplicationSpecifier", + "documentation":"

The name or ID of the application.

", + "location":"uri", + "locationName":"application" + }, + "resourceType":{ + "shape":"ResourceType", + "documentation":"

The type of the resource that is being disassociated.

", + "location":"uri", + "locationName":"resourceType" + }, + "resource":{ + "shape":"ResourceSpecifier", + "documentation":"

The name or ID of the resource.

", + "location":"uri", + "locationName":"resource" + } + } + }, + "DisassociateResourceResponse":{ + "type":"structure", + "members":{ + "applicationArn":{ + "shape":"ApplicationArn", + "documentation":"

The Amazon resource name (ARN) that specifies the application.

" + }, + "resourceArn":{ + "shape":"Arn", + "documentation":"

The Amazon resource name (ARN) that specifies the resource.

" + } + } + }, + "GetApplicationRequest":{ + "type":"structure", + "required":["application"], + "members":{ + "application":{ + "shape":"ApplicationSpecifier", + "documentation":"

The name or ID of the application.

", + "location":"uri", + "locationName":"application" + } + } + }, + "GetApplicationResponse":{ + "type":"structure", + "members":{ + "id":{ + "shape":"ApplicationId", + "documentation":"

The identifier of the application.

" + }, + "arn":{ + "shape":"ApplicationArn", + "documentation":"

The Amazon resource name (ARN) that specifies the application across services.

" + }, + "name":{ + "shape":"Name", + "documentation":"

The name of the application. The name must be unique in the region in which you are creating the application.

" + }, + "description":{ + "shape":"Description", + "documentation":"

The description of the application.

" + }, + "creationTime":{ + "shape":"Timestamp", + "documentation":"

The ISO-8601 formatted timestamp of the moment when the application was created.

" + }, + "lastUpdateTime":{ + "shape":"Timestamp", + "documentation":"

The ISO-8601 formatted timestamp of the moment when the application was last updated.

" + }, + "associatedResourceCount":{ + "shape":"AssociationCount", + "documentation":"

The number of top-level resources that were registered as part of this application.

" + }, + "tags":{ + "shape":"Tags", + "documentation":"

Key-value pairs associated with the application.

" + } + } + }, + "GetAttributeGroupRequest":{ + "type":"structure", + "required":["attributeGroup"], + "members":{ + "attributeGroup":{ + "shape":"AttributeGroupSpecifier", + "documentation":"

The name or ID of the attribute group that holds the attributes to describe the application.

", + "location":"uri", + "locationName":"attributeGroup" + } + } + }, + "GetAttributeGroupResponse":{ + "type":"structure", + "members":{ + "id":{ + "shape":"AttributeGroupId", + "documentation":"

The identifier of the attribute group.

" + }, + "arn":{ + "shape":"AttributeGroupArn", + "documentation":"

The Amazon resource name (ARN) that specifies the attribute group across services.

" + }, + "name":{ + "shape":"Name", + "documentation":"

The name of the attribute group.

" + }, + "description":{ + "shape":"Description", + "documentation":"

The description of the attribute group that the user provides.

" + }, + "attributes":{ + "shape":"Attributes", + "documentation":"

A JSON string in the form of nested key-value pairs that represent the attributes in the group and describes an application and its components.

" + }, + "creationTime":{ + "shape":"Timestamp", + "documentation":"

The ISO-8601 formatted timestamp of the moment the attribute group was created.

" + }, + "lastUpdateTime":{ + "shape":"Timestamp", + "documentation":"

The ISO-8601 formatted timestamp of the moment the attribute group was last updated. This time is the same as the creationTime for a newly created attribute group.

" + }, + "tags":{ + "shape":"Tags", + "documentation":"

Key-value pairs associated with the attribute group.

" + } + } + }, + "InternalServerException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

The service is experiencing internal problems.

", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "ListApplicationsRequest":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token to use to get the next page of results after a previous API call.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The upper bound of the number of results to return (cannot exceed 25). If this parameter is omitted, it defaults to 25. This value is optional.

", + "box":true, + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListApplicationsResponse":{ + "type":"structure", + "members":{ + "applications":{ + "shape":"ApplicationSummaries", + "documentation":"

This list of applications.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token to use to get the next page of results after a previous API call.

" + } + } + }, + "ListAssociatedAttributeGroupsRequest":{ + "type":"structure", + "required":["application"], + "members":{ + "application":{ + "shape":"ApplicationSpecifier", + "documentation":"

The name or ID of the application.

", + "location":"uri", + "locationName":"application" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token to use to get the next page of results after a previous API call.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The upper bound of the number of results to return (cannot exceed 25). If this parameter is omitted, it defaults to 25. This value is optional.

", + "box":true, + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListAssociatedAttributeGroupsResponse":{ + "type":"structure", + "members":{ + "attributeGroups":{ + "shape":"AttributeGroupIds", + "documentation":"

A list of attribute group IDs.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token to use to get the next page of results after a previous API call.

" + } + } + }, + "ListAssociatedResourcesRequest":{ + "type":"structure", + "required":["application"], + "members":{ + "application":{ + "shape":"ApplicationSpecifier", + "documentation":"

The name or ID of the application.

", + "location":"uri", + "locationName":"application" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token to use to get the next page of results after a previous API call.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The upper bound of the number of results to return (cannot exceed 25). If this parameter is omitted, it defaults to 25. This value is optional.

", + "box":true, + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListAssociatedResourcesResponse":{ + "type":"structure", + "members":{ + "resources":{ + "shape":"Resources", + "documentation":"

Information about the resources.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token to use to get the next page of results after a previous API call.

" + } + } + }, + "ListAttributeGroupsRequest":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token to use to get the next page of results after a previous API call.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The upper bound of the number of results to return (cannot exceed 25). If this parameter is omitted, it defaults to 25. This value is optional.

", + "box":true, + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListAttributeGroupsResponse":{ + "type":"structure", + "members":{ + "attributeGroups":{ + "shape":"AttributeGroupSummaries", + "documentation":"

This list of attribute groups.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token to use to get the next page of results after a previous API call.

" + } + } + }, + "MaxResults":{ + "type":"integer", + "max":25, + "min":1 + }, + "Name":{ + "type":"string", + "max":256, + "min":1, + "pattern":"\\w+" + }, + "NextToken":{ + "type":"string", + "max":2024, + "min":1, + "pattern":"[A-Za-z0-9+/=]+" + }, + "ResourceInfo":{ + "type":"structure", + "members":{ + "name":{ + "shape":"ResourceSpecifier", + "documentation":"

The name of the resource.

" + }, + "arn":{ + "shape":"StackArn", + "documentation":"

The Amazon resource name (ARN) that specifies the resource across services.

" + } + }, + "documentation":"

Information about the resource.

" + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

The specified resource does not exist.

", + "error":{"httpStatusCode":404}, + "exception":true + }, + "ResourceSpecifier":{ + "type":"string", + "max":256, + "min":1, + "pattern":"\\S+" + }, + "ResourceType":{ + "type":"string", + "enum":["CFN_STACK"] + }, + "Resources":{ + "type":"list", + "member":{"shape":"ResourceInfo"} + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

The maximum number of resources per account has been reached.

", + "error":{"httpStatusCode":402}, + "exception":true + }, + "StackArn":{ + "type":"string", + "pattern":"arn:aws[-a-z]*:cloudformation:[a-z]{2}(-gov)?-[a-z]+-\\d:\\d{12}:stack/[a-zA-Z][-A-Za-z0-9]{0,127}/[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}" + }, + "String":{"type":"string"}, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"(?!aws:)[a-zA-Z+-=._:/]+" + }, + "TagValue":{ + "type":"string", + "max":256, + "pattern":"[\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*" + }, + "Tags":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":50, + "min":0 + }, + "Timestamp":{ + "type":"timestamp", + "timestampFormat":"iso8601" + }, + "UpdateApplicationRequest":{ + "type":"structure", + "required":["application"], + "members":{ + "application":{ + "shape":"ApplicationSpecifier", + "documentation":"

The name or ID of the application that will be updated.

", + "location":"uri", + "locationName":"application" + }, + "name":{ + "shape":"Name", + "documentation":"

The new name of the application. The name must be unique in the region in which you are updating the application.

" + }, + "description":{ + "shape":"Description", + "documentation":"

The new description of the application.

" + } + } + }, + "UpdateApplicationResponse":{ + "type":"structure", + "members":{ + "application":{ + "shape":"Application", + "documentation":"

The updated information of the application.

" + } + } + }, + "UpdateAttributeGroupRequest":{ + "type":"structure", + "required":["attributeGroup"], + "members":{ + "attributeGroup":{ + "shape":"AttributeGroupSpecifier", + "documentation":"

The name or ID of the attribute group that holds the attributes to describe the application.

", + "location":"uri", + "locationName":"attributeGroup" + }, + "name":{ + "shape":"Name", + "documentation":"

The new name of the attribute group. The name must be unique in the region in which you are updating the attribute group.

" + }, + "description":{ + "shape":"Description", + "documentation":"

The description of the attribute group that the user provides.

" + }, + "attributes":{ + "shape":"Attributes", + "documentation":"

A JSON string in the form of nested key-value pairs that represent the attributes in the group and describes an application and its components.

" + } + } + }, + "UpdateAttributeGroupResponse":{ + "type":"structure", + "members":{ + "attributeGroup":{ + "shape":"AttributeGroup", + "documentation":"

The updated information of the attribute group.

" + } + } + }, + "ValidationException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

The request has invalid or missing parameters.

", + "error":{"httpStatusCode":400}, + "exception":true + } + }, + "documentation":"

AWS Service Catalog AppRegistry enables organizations to understand the application context of their AWS resources. AppRegistry provides a repository of your applications, their resources, and the application metadata that you use within your enterprise.

" +} diff --git a/services/servicediscovery/pom.xml b/services/servicediscovery/pom.xml index 780081529374..ba4cfaf6151c 100644 --- a/services/servicediscovery/pom.xml +++ b/services/servicediscovery/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT 4.0.0 servicediscovery diff --git a/services/servicequotas/pom.xml b/services/servicequotas/pom.xml index 6de800a4ac36..14364ad48db1 100644 --- a/services/servicequotas/pom.xml +++ b/services/servicequotas/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT servicequotas AWS Java SDK :: Services :: Service Quotas diff --git a/services/ses/pom.xml b/services/ses/pom.xml index 26ec90ac48a4..768f92920256 100644 --- a/services/ses/pom.xml +++ b/services/ses/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT ses AWS Java SDK :: Services :: Amazon SES diff --git a/services/sesv2/pom.xml b/services/sesv2/pom.xml index e994da46667f..aba26254b0a8 100644 --- a/services/sesv2/pom.xml +++ b/services/sesv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT sesv2 AWS Java SDK :: Services :: SESv2 diff --git a/services/sesv2/src/main/resources/codegen-resources/paginators-1.json b/services/sesv2/src/main/resources/codegen-resources/paginators-1.json index 6acdcfdb4384..3d39be48f44a 100644 --- a/services/sesv2/src/main/resources/codegen-resources/paginators-1.json +++ b/services/sesv2/src/main/resources/codegen-resources/paginators-1.json @@ -10,6 +10,16 @@ "output_token": "NextToken", "limit_key": "PageSize" }, + "ListContactLists": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "PageSize" + }, + "ListContacts": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "PageSize" + }, "ListCustomVerificationEmailTemplates": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/services/sesv2/src/main/resources/codegen-resources/service-2.json b/services/sesv2/src/main/resources/codegen-resources/service-2.json index 216e43cf4cc4..f165ed3bb91b 100644 --- a/services/sesv2/src/main/resources/codegen-resources/service-2.json +++ b/services/sesv2/src/main/resources/codegen-resources/service-2.json @@ -48,6 +48,38 @@ ], "documentation":"

Create an event destination. Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.

A single configuration set can include more than one event destination.

" }, + "CreateContact":{ + "name":"CreateContact", + "http":{ + "method":"POST", + "requestUri":"/v2/email/contact-lists/{ContactListName}/contacts" + }, + "input":{"shape":"CreateContactRequest"}, + "output":{"shape":"CreateContactResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"NotFoundException"}, + {"shape":"AlreadyExistsException"} + ], + "documentation":"

Creates a contact, which is an end-user who is receiving the email, and adds them to a contact list.

" + }, + "CreateContactList":{ + "name":"CreateContactList", + "http":{ + "method":"POST", + "requestUri":"/v2/email/contact-lists" + }, + "input":{"shape":"CreateContactListRequest"}, + "output":{"shape":"CreateContactListResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"AlreadyExistsException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Creates a contact list.

" + }, "CreateCustomVerificationEmailTemplate":{ "name":"CreateCustomVerificationEmailTemplate", "http":{ @@ -199,6 +231,37 @@ ], "documentation":"

Delete an event destination.

Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.

" }, + "DeleteContact":{ + "name":"DeleteContact", + "http":{ + "method":"DELETE", + "requestUri":"/v2/email/contact-lists/{ContactListName}/contacts/{EmailAddress}" + }, + "input":{"shape":"DeleteContactRequest"}, + "output":{"shape":"DeleteContactResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"NotFoundException"} + ], + "documentation":"

Removes a contact from a contact list.

" + }, + "DeleteContactList":{ + "name":"DeleteContactList", + "http":{ + "method":"DELETE", + "requestUri":"/v2/email/contact-lists/{ContactListName}" + }, + "input":{"shape":"DeleteContactListRequest"}, + "output":{"shape":"DeleteContactListResponse"}, + "errors":[ + {"shape":"TooManyRequestsException"}, + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"ConcurrentModificationException"} + ], + "documentation":"

Deletes a contact list and all of the contacts on that list.

" + }, "DeleteCustomVerificationEmailTemplate":{ "name":"DeleteCustomVerificationEmailTemplate", "http":{ @@ -350,6 +413,36 @@ ], "documentation":"

Retrieve a list of event destinations that are associated with a configuration set.

Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.

" }, + "GetContact":{ + "name":"GetContact", + "http":{ + "method":"GET", + "requestUri":"/v2/email/contact-lists/{ContactListName}/contacts/{EmailAddress}" + }, + "input":{"shape":"GetContactRequest"}, + "output":{"shape":"GetContactResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"NotFoundException"} + ], + "documentation":"

Returns a contact from a contact list.

" + }, + "GetContactList":{ + "name":"GetContactList", + "http":{ + "method":"GET", + "requestUri":"/v2/email/contact-lists/{ContactListName}" + }, + "input":{"shape":"GetContactListRequest"}, + "output":{"shape":"GetContactListResponse"}, + "errors":[ + {"shape":"TooManyRequestsException"}, + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

Returns contact list metadata. It does not return any information about the contacts present in the list.

" + }, "GetCustomVerificationEmailTemplate":{ "name":"GetCustomVerificationEmailTemplate", "http":{ @@ -544,6 +637,35 @@ ], "documentation":"

List all of the configuration sets associated with your account in the current region.

Configuration sets are groups of rules that you can apply to the emails you send. You apply a configuration set to an email by including a reference to the configuration set in the headers of the email. When you apply a configuration set to an email, all of the rules in that configuration set are applied to the email.

" }, + "ListContactLists":{ + "name":"ListContactLists", + "http":{ + "method":"GET", + "requestUri":"/v2/email/contact-lists" + }, + "input":{"shape":"ListContactListsRequest"}, + "output":{"shape":"ListContactListsResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

Lists all of the contact lists available.

" + }, + "ListContacts":{ + "name":"ListContacts", + "http":{ + "method":"GET", + "requestUri":"/v2/email/contact-lists/{ContactListName}/contacts" + }, + "input":{"shape":"ListContactsRequest"}, + "output":{"shape":"ListContactsResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"NotFoundException"} + ], + "documentation":"

Lists the contacts present in a specific contact list.

" + }, "ListCustomVerificationEmailTemplates":{ "name":"ListCustomVerificationEmailTemplates", "http":{ @@ -1048,6 +1170,38 @@ ], "documentation":"

Update the configuration of an event destination for a configuration set.

Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.

" }, + "UpdateContact":{ + "name":"UpdateContact", + "http":{ + "method":"PUT", + "requestUri":"/v2/email/contact-lists/{ContactListName}/contacts/{EmailAddress}" + }, + "input":{"shape":"UpdateContactRequest"}, + "output":{"shape":"UpdateContactResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"NotFoundException"}, + {"shape":"ConcurrentModificationException"} + ], + "documentation":"

Updates a contact's preferences for a list. It is not necessary to specify all existing topic preferences in the TopicPreferences object, just the ones that need updating.

" + }, + "UpdateContactList":{ + "name":"UpdateContactList", + "http":{ + "method":"PUT", + "requestUri":"/v2/email/contact-lists/{ContactListName}" + }, + "input":{"shape":"UpdateContactListRequest"}, + "output":{"shape":"UpdateContactListResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"NotFoundException"}, + {"shape":"ConcurrentModificationException"} + ], + "documentation":"

Updates contact list metadata. This operation does a complete replacement.

" + }, "UpdateCustomVerificationEmailTemplate":{ "name":"UpdateCustomVerificationEmailTemplate", "http":{ @@ -1156,6 +1310,7 @@ "exception":true }, "AmazonResourceName":{"type":"string"}, + "AttributesData":{"type":"string"}, "BadRequestException":{ "type":"structure", "members":{ @@ -1366,6 +1521,32 @@ "error":{"httpStatusCode":409}, "exception":true }, + "Contact":{ + "type":"structure", + "members":{ + "EmailAddress":{ + "shape":"EmailAddress", + "documentation":"

The contact's email address.

" + }, + "TopicPreferences":{ + "shape":"TopicPreferenceList", + "documentation":"

The contact's preference for being opted-in to or opted-out of a topic.

" + }, + "TopicDefaultPreferences":{ + "shape":"TopicPreferenceList", + "documentation":"

The default topic preferences applied to the contact.

" + }, + "UnsubscribeAll":{ + "shape":"UnsubscribeAll", + "documentation":"

A boolean value status noting if the contact is unsubscribed from all contact list topics.

" + }, + "LastUpdatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

A timestamp noting the last time the contact's information was updated.

" + } + }, + "documentation":"

A contact is the end-user who is receiving the email.

" + }, "ContactLanguage":{ "type":"string", "enum":[ @@ -1373,6 +1554,46 @@ "JA" ] }, + "ContactList":{ + "type":"structure", + "members":{ + "ContactListName":{ + "shape":"ContactListName", + "documentation":"

The name of the contact list.

" + }, + "LastUpdatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

A timestamp noting the last time the contact list was updated.

" + } + }, + "documentation":"

A list that contains contacts that have subscribed to a particular topic or topics.

" + }, + "ContactListDestination":{ + "type":"structure", + "required":[ + "ContactListName", + "ContactListImportAction" + ], + "members":{ + "ContactListName":{ + "shape":"ContactListName", + "documentation":"

The name of the contact list.

" + }, + "ContactListImportAction":{ + "shape":"ContactListImportAction", + "documentation":"

>The type of action that you want to perform on the addresses. Acceptable values:

  • PUT: add the addresses to the contact list. If the record already exists, it will override it with the new value.

  • DELETE: remove the addresses from the contact list.

" + } + }, + "documentation":"

An object that contains details about the action of a contact list.

" + }, + "ContactListImportAction":{ + "type":"string", + "enum":[ + "DELETE", + "PUT" + ] + }, + "ContactListName":{"type":"string"}, "Content":{ "type":"structure", "required":["Data"], @@ -1457,6 +1678,69 @@ }, "documentation":"

An HTTP 200 response if the request succeeds, or an error message if the request fails.

" }, + "CreateContactListRequest":{ + "type":"structure", + "required":["ContactListName"], + "members":{ + "ContactListName":{ + "shape":"ContactListName", + "documentation":"

The name of the contact list.

" + }, + "Topics":{ + "shape":"Topics", + "documentation":"

An interest group, theme, or label within a list. A contact list can have multiple topics.

" + }, + "Description":{ + "shape":"Description", + "documentation":"

A description of what the contact list is about.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags associated with a contact list.

" + } + } + }, + "CreateContactListResponse":{ + "type":"structure", + "members":{ + } + }, + "CreateContactRequest":{ + "type":"structure", + "required":[ + "ContactListName", + "EmailAddress" + ], + "members":{ + "ContactListName":{ + "shape":"ContactListName", + "documentation":"

The name of the contact list to which the contact should be added.

", + "location":"uri", + "locationName":"ContactListName" + }, + "EmailAddress":{ + "shape":"EmailAddress", + "documentation":"

The contact's email address.

" + }, + "TopicPreferences":{ + "shape":"TopicPreferenceList", + "documentation":"

The contact's preferences for being opted-in to or opted-out of topics.

" + }, + "UnsubscribeAll":{ + "shape":"UnsubscribeAll", + "documentation":"

A boolean value status noting if the contact is unsubscribed from all contact list topics.

" + }, + "AttributesData":{ + "shape":"AttributesData", + "documentation":"

The attribute data attached to a contact.

" + } + } + }, + "CreateContactResponse":{ + "type":"structure", + "members":{ + } + }, "CreateCustomVerificationEmailTemplateRequest":{ "type":"structure", "required":[ @@ -1836,6 +2120,49 @@ }, "documentation":"

An HTTP 200 response if the request succeeds, or an error message if the request fails.

" }, + "DeleteContactListRequest":{ + "type":"structure", + "required":["ContactListName"], + "members":{ + "ContactListName":{ + "shape":"ContactListName", + "documentation":"

The name of the contact list.

", + "location":"uri", + "locationName":"ContactListName" + } + } + }, + "DeleteContactListResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteContactRequest":{ + "type":"structure", + "required":[ + "ContactListName", + "EmailAddress" + ], + "members":{ + "ContactListName":{ + "shape":"ContactListName", + "documentation":"

The name of the contact list from which the contact should be removed.

", + "location":"uri", + "locationName":"ContactListName" + }, + "EmailAddress":{ + "shape":"EmailAddress", + "documentation":"

The contact's email address.

", + "location":"uri", + "locationName":"EmailAddress" + } + } + }, + "DeleteContactResponse":{ + "type":"structure", + "members":{ + } + }, "DeleteCustomVerificationEmailTemplateRequest":{ "type":"structure", "required":["TemplateName"], @@ -2028,6 +2355,7 @@ }, "documentation":"

Used to associate a configuration set with a dedicated IP pool.

" }, + "Description":{"type":"string"}, "Destination":{ "type":"structure", "members":{ @@ -2059,6 +2387,7 @@ "LINK_TAG" ] }, + "DisplayName":{"type":"string"}, "DkimAttributes":{ "type":"structure", "members":{ @@ -2421,7 +2750,8 @@ "OPEN", "CLICK", "RENDERING_FAILURE", - "DELIVERY_DELAY" + "DELIVERY_DELAY", + "SUBSCRIPTION" ] }, "EventTypes":{ @@ -2584,6 +2914,105 @@ }, "documentation":"

Information about a configuration set.

" }, + "GetContactListRequest":{ + "type":"structure", + "required":["ContactListName"], + "members":{ + "ContactListName":{ + "shape":"ContactListName", + "documentation":"

The name of the contact list.

", + "location":"uri", + "locationName":"ContactListName" + } + } + }, + "GetContactListResponse":{ + "type":"structure", + "members":{ + "ContactListName":{ + "shape":"ContactListName", + "documentation":"

The name of the contact list.

" + }, + "Topics":{ + "shape":"Topics", + "documentation":"

An interest group, theme, or label within a list. A contact list can have multiple topics.

" + }, + "Description":{ + "shape":"Description", + "documentation":"

A description of what the contact list is about.

" + }, + "CreatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

A timestamp noting when the contact list was created.

" + }, + "LastUpdatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

A timestamp noting the last time the contact list was updated.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags associated with a contact list.

" + } + } + }, + "GetContactRequest":{ + "type":"structure", + "required":[ + "ContactListName", + "EmailAddress" + ], + "members":{ + "ContactListName":{ + "shape":"ContactListName", + "documentation":"

The name of the contact list to which the contact belongs.

", + "location":"uri", + "locationName":"ContactListName" + }, + "EmailAddress":{ + "shape":"EmailAddress", + "documentation":"

The contact's email addres.

", + "location":"uri", + "locationName":"EmailAddress" + } + } + }, + "GetContactResponse":{ + "type":"structure", + "members":{ + "ContactListName":{ + "shape":"ContactListName", + "documentation":"

The name of the contact list to which the contact belongs.

" + }, + "EmailAddress":{ + "shape":"EmailAddress", + "documentation":"

The contact's email addres.

" + }, + "TopicPreferences":{ + "shape":"TopicPreferenceList", + "documentation":"

The contact's preference for being opted-in to or opted-out of a topic.>

" + }, + "TopicDefaultPreferences":{ + "shape":"TopicPreferenceList", + "documentation":"

The default topic preferences applied to the contact.

" + }, + "UnsubscribeAll":{ + "shape":"UnsubscribeAll", + "documentation":"

A boolean value status noting if the contact is unsubscribed from all contact list topics.

" + }, + "AttributesData":{ + "shape":"AttributesData", + "documentation":"

The attribute data attached to a contact.

" + }, + "CreatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

A timestamp noting when the contact was created.

" + }, + "LastUpdatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

A timestamp noting the last time the contact's information was updated.

" + } + } + }, "GetCustomVerificationEmailTemplateRequest":{ "type":"structure", "required":["TemplateName"], @@ -3072,11 +3501,14 @@ }, "ImportDestination":{ "type":"structure", - "required":["SuppressionListDestination"], "members":{ "SuppressionListDestination":{ "shape":"SuppressionListDestination", "documentation":"

An object that contains the action of the import job towards suppression list.

" + }, + "ContactListDestination":{ + "shape":"ContactListDestination", + "documentation":"

An object that contains the action of the import job towards a contact list.

" } }, "documentation":"

An object that contains details about the resource destination the import job is going to target.

" @@ -3084,7 +3516,10 @@ "ImportDestinationType":{ "type":"string", "documentation":"

The destination of the import job, which can be used to list import jobs that have a certain ImportDestinationType.

", - "enum":["SUPPRESSION_LIST"] + "enum":[ + "SUPPRESSION_LIST", + "CONTACT_LIST" + ] }, "ImportJobSummary":{ "type":"structure", @@ -3092,7 +3527,10 @@ "JobId":{"shape":"JobId"}, "ImportDestination":{"shape":"ImportDestination"}, "JobStatus":{"shape":"JobStatus"}, - "CreatedTimestamp":{"shape":"Timestamp"} + "CreatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

The date and time when the import job was created.

" + } }, "documentation":"

A summary of the import job.

" }, @@ -3234,6 +3672,91 @@ }, "documentation":"

A list of configuration sets in your Amazon SES account in the current AWS Region.

" }, + "ListContactListsRequest":{ + "type":"structure", + "members":{ + "PageSize":{ + "shape":"MaxItems", + "documentation":"

Maximum number of contact lists to return at once. Use this parameter to paginate results. If additional contact lists exist beyond the specified limit, the NextToken element is sent in the response. Use the NextToken value in subsequent requests to retrieve additional lists.

", + "location":"querystring", + "locationName":"PageSize" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A string token indicating that there might be additional contact lists available to be listed. Use the token provided in the Response to use in the subsequent call to ListContactLists with the same parameters to retrieve the next page of contact lists.

", + "location":"querystring", + "locationName":"NextToken" + } + } + }, + "ListContactListsResponse":{ + "type":"structure", + "members":{ + "ContactLists":{ + "shape":"ListOfContactLists", + "documentation":"

The available contact lists.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A string token indicating that there might be additional contact lists available to be listed. Copy this token to a subsequent call to ListContactLists with the same parameters to retrieve the next page of contact lists.

" + } + } + }, + "ListContactsFilter":{ + "type":"structure", + "members":{ + "FilteredStatus":{ + "shape":"SubscriptionStatus", + "documentation":"

The status by which you are filtering: OPT_IN or OPT_OUT.

" + }, + "TopicFilter":{ + "shape":"TopicFilter", + "documentation":"

Used for filtering by a specific topic preference.

" + } + }, + "documentation":"

A filter that can be applied to a list of contacts.

" + }, + "ListContactsRequest":{ + "type":"structure", + "required":["ContactListName"], + "members":{ + "ContactListName":{ + "shape":"ContactListName", + "documentation":"

The name of the contact list.

", + "location":"uri", + "locationName":"ContactListName" + }, + "Filter":{ + "shape":"ListContactsFilter", + "documentation":"

A filter that can be applied to a list of contacts.

" + }, + "PageSize":{ + "shape":"MaxItems", + "documentation":"

The number of contacts that may be returned at once, which is dependent on if there are more or less contacts than the value of the PageSize. Use this parameter to paginate results. If additional contacts exist beyond the specified limit, the NextToken element is sent in the response. Use the NextToken value in subsequent requests to retrieve additional contacts.

", + "location":"querystring", + "locationName":"PageSize" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A string token indicating that there might be additional contacts available to be listed. Use the token provided in the Response to use in the subsequent call to ListContacts with the same parameters to retrieve the next page of contacts.

", + "location":"querystring", + "locationName":"NextToken" + } + } + }, + "ListContactsResponse":{ + "type":"structure", + "members":{ + "Contacts":{ + "shape":"ListOfContacts", + "documentation":"

The contacts present in a specific contact list.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A string token indicating that there might be additional contacts available to be listed. Copy this token to a subsequent call to ListContacts with the same parameters to retrieve the next page of contacts.

" + } + } + }, "ListCustomVerificationEmailTemplatesRequest":{ "type":"structure", "members":{ @@ -3487,6 +4010,29 @@ }, "documentation":"

An HTTP 200 response if the request succeeds, or an error message if the request fails.

" }, + "ListManagementOptions":{ + "type":"structure", + "required":["ContactListName"], + "members":{ + "ContactListName":{ + "shape":"ContactListName", + "documentation":"

The name of the contact list.

" + }, + "TopicName":{ + "shape":"TopicName", + "documentation":"

The name of the topic.

" + } + }, + "documentation":"

An object used to specify a list or topic to which an email belongs, which will be used when a contact chooses to unsubscribe.

" + }, + "ListOfContactLists":{ + "type":"list", + "member":{"shape":"ContactList"} + }, + "ListOfContacts":{ + "type":"list", + "member":{"shape":"Contact"} + }, "ListOfDedicatedIpPools":{ "type":"list", "member":{"shape":"PoolName"}, @@ -4419,6 +4965,10 @@ "ConfigurationSetName":{ "shape":"ConfigurationSetName", "documentation":"

The name of the configuration set that you want to use when sending the email.

" + }, + "ListManagementOptions":{ + "shape":"ListManagementOptions", + "documentation":"

An object used to specify a list or topic to which an email belongs, which will be used when a contact chooses to unsubscribe.

" } }, "documentation":"

Represents a request to send a single formatted email using Amazon SES. For more information, see the Amazon SES Developer Guide.

" @@ -4486,6 +5036,13 @@ "documentation":"

An object that defines an Amazon SNS destination for email events. You can use Amazon SNS to send notification when certain email events occur.

" }, "Subject":{"type":"string"}, + "SubscriptionStatus":{ + "type":"string", + "enum":[ + "OPT_IN", + "OPT_OUT" + ] + }, "SuccessRedirectionURL":{ "type":"string", "documentation":"

The URL that the recipient of the verification email is sent to if his or her address is successfully verified.

" @@ -4729,6 +5286,74 @@ "error":{"httpStatusCode":429}, "exception":true }, + "Topic":{ + "type":"structure", + "required":[ + "TopicName", + "DisplayName", + "DefaultSubscriptionStatus" + ], + "members":{ + "TopicName":{ + "shape":"TopicName", + "documentation":"

The name of the topic.

" + }, + "DisplayName":{ + "shape":"DisplayName", + "documentation":"

The name of the topic the contact will see.

" + }, + "Description":{ + "shape":"Description", + "documentation":"

A description of what the topic is about, which the contact will see.

" + }, + "DefaultSubscriptionStatus":{ + "shape":"SubscriptionStatus", + "documentation":"

The default subscription status to be applied to a contact if the contact has not noted their preference for subscribing to a topic.

" + } + }, + "documentation":"

An interest group, theme, or label within a list. Lists can have multiple topics.

" + }, + "TopicFilter":{ + "type":"structure", + "members":{ + "TopicName":{ + "shape":"TopicName", + "documentation":"

The name of a topic on which you wish to apply the filter.

" + }, + "UseDefaultIfPreferenceUnavailable":{ + "shape":"UseDefaultIfPreferenceUnavailable", + "documentation":"

Notes that the default subscription status should be applied to a contact because the contact has not noted their preference for subscribing to a topic.

" + } + }, + "documentation":"

Used for filtering by a specific topic preference.

" + }, + "TopicName":{"type":"string"}, + "TopicPreference":{ + "type":"structure", + "required":[ + "TopicName", + "SubscriptionStatus" + ], + "members":{ + "TopicName":{ + "shape":"TopicName", + "documentation":"

The name of the topic.

" + }, + "SubscriptionStatus":{ + "shape":"SubscriptionStatus", + "documentation":"

The contact's subscription status to a topic which is either OPT_IN or OPT_OUT.

" + } + }, + "documentation":"

The contact's preference for being opted-in to or opted-out of a topic.

" + }, + "TopicPreferenceList":{ + "type":"list", + "member":{"shape":"TopicPreference"} + }, + "Topics":{ + "type":"list", + "member":{"shape":"Topic"} + }, "TrackingOptions":{ "type":"structure", "required":["CustomRedirectDomain"], @@ -4740,6 +5365,7 @@ }, "documentation":"

An object that defines the tracking options for a configuration set. When you use the Amazon SES API v2 to send an email, it contains an invisible image that's used to track when recipients open your email. If your email contains links, those links are changed slightly in order to track when recipients click them.

These images and links include references to a domain operated by AWS. You can optionally configure the Amazon SES to use a domain that you operate for these images and links.

" }, + "UnsubscribeAll":{"type":"boolean"}, "UntagResourceRequest":{ "type":"structure", "required":[ @@ -4799,6 +5425,69 @@ }, "documentation":"

An HTTP 200 response if the request succeeds, or an error message if the request fails.

" }, + "UpdateContactListRequest":{ + "type":"structure", + "required":["ContactListName"], + "members":{ + "ContactListName":{ + "shape":"ContactListName", + "documentation":"

The name of the contact list.

", + "location":"uri", + "locationName":"ContactListName" + }, + "Topics":{ + "shape":"Topics", + "documentation":"

An interest group, theme, or label within a list. A contact list can have multiple topics.

" + }, + "Description":{ + "shape":"Description", + "documentation":"

A description of what the contact list is about.

" + } + } + }, + "UpdateContactListResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateContactRequest":{ + "type":"structure", + "required":[ + "ContactListName", + "EmailAddress" + ], + "members":{ + "ContactListName":{ + "shape":"ContactListName", + "documentation":"

The name of the contact list.

", + "location":"uri", + "locationName":"ContactListName" + }, + "EmailAddress":{ + "shape":"EmailAddress", + "documentation":"

The contact's email addres.

", + "location":"uri", + "locationName":"EmailAddress" + }, + "TopicPreferences":{ + "shape":"TopicPreferenceList", + "documentation":"

The contact's preference for being opted-in to or opted-out of a topic.

" + }, + "UnsubscribeAll":{ + "shape":"UnsubscribeAll", + "documentation":"

A boolean value status noting if the contact is unsubscribed from all contact list topics.

" + }, + "AttributesData":{ + "shape":"AttributesData", + "documentation":"

The attribute data attached to a contact.

" + } + } + }, + "UpdateContactResponse":{ + "type":"structure", + "members":{ + } + }, "UpdateCustomVerificationEmailTemplateRequest":{ "type":"structure", "required":[ @@ -4910,6 +5599,7 @@ "min":1, "sensitive":true }, + "UseDefaultIfPreferenceUnavailable":{"type":"boolean"}, "Volume":{ "type":"long", "documentation":"

An object that contains information about inbox placement volume.

" diff --git a/services/sfn/pom.xml b/services/sfn/pom.xml index 051ea2cd8e5c..a1b64227fffc 100644 --- a/services/sfn/pom.xml +++ b/services/sfn/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT sfn AWS Java SDK :: Services :: AWS Step Functions diff --git a/services/sfn/src/main/resources/codegen-resources/service-2.json b/services/sfn/src/main/resources/codegen-resources/service-2.json index 0e42d909e7dd..cf4163c6c13e 100644 --- a/services/sfn/src/main/resources/codegen-resources/service-2.json +++ b/services/sfn/src/main/resources/codegen-resources/service-2.json @@ -286,6 +286,25 @@ "documentation":"

Starts a state machine execution.

StartExecution is idempotent. If StartExecution is called with the same name and input as a running execution, the call will succeed and return the same response as the original request. If the execution is closed or if the input is different, it will return a 400 ExecutionAlreadyExists error. Names can be reused after 90 days.

", "idempotent":true }, + "StartSyncExecution":{ + "name":"StartSyncExecution", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartSyncExecutionInput"}, + "output":{"shape":"StartSyncExecutionOutput"}, + "errors":[ + {"shape":"InvalidArn"}, + {"shape":"InvalidExecutionInput"}, + {"shape":"InvalidName"}, + {"shape":"StateMachineDoesNotExist"}, + {"shape":"StateMachineDeleting"}, + {"shape":"StateMachineTypeNotSupported"} + ], + "documentation":"

Starts a Synchronous Express state machine execution.

", + "endpoint":{"hostPrefix":"sync-"} + }, "StopExecution":{ "name":"StopExecution", "http":{ @@ -502,11 +521,33 @@ "max":256, "min":1 }, + "BilledDuration":{ + "type":"long", + "min":0 + }, + "BilledMemoryUsed":{ + "type":"long", + "min":0 + }, + "BillingDetails":{ + "type":"structure", + "members":{ + "billedMemoryUsedInMB":{ + "shape":"BilledMemoryUsed", + "documentation":"

Billed memory consumption of your workflow, in MB.

" + }, + "billedDurationInMilliseconds":{ + "shape":"BilledDuration", + "documentation":"

Billed duration of your workflow, in milliseconds.

" + } + }, + "documentation":"

An object that describes workflow billing details.

" + }, "CloudWatchEventsExecutionDataDetails":{ "type":"structure", "members":{ "included":{ - "shape":"included", + "shape":"includedDetails", "documentation":"

Indicates whether input or output was included in the response. Always true for API calls.

" } }, @@ -703,7 +744,7 @@ "members":{ "executionArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) that id entifies the execution.

" + "documentation":"

The Amazon Resource Name (ARN) that identifies the execution.

" }, "stateMachineArn":{ "shape":"Arn", @@ -737,7 +778,7 @@ "outputDetails":{"shape":"CloudWatchEventsExecutionDataDetails"}, "traceHeader":{ "shape":"TraceHeader", - "documentation":"

The AWS X-Ray trace header which was passed to the execution.

" + "documentation":"

The AWS X-Ray trace header that was passed to the execution.

" } } }, @@ -915,7 +956,7 @@ "members":{ "executionArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) that id entifies the execution.

" + "documentation":"

The Amazon Resource Name (ARN) that identifies the execution.

" }, "stateMachineArn":{ "shape":"Arn", @@ -1734,7 +1775,7 @@ "members":{ "executionArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) that id entifies the execution.

" + "documentation":"

The Amazon Resource Name (ARN) that identifies the execution.

" }, "startDate":{ "shape":"Timestamp", @@ -1742,6 +1783,89 @@ } } }, + "StartSyncExecutionInput":{ + "type":"structure", + "required":["stateMachineArn"], + "members":{ + "stateMachineArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the state machine to execute.

" + }, + "name":{ + "shape":"Name", + "documentation":"

The name of the execution.

" + }, + "input":{ + "shape":"SensitiveData", + "documentation":"

The string that contains the JSON input data for the execution, for example:

\"input\": \"{\\\"first_name\\\" : \\\"test\\\"}\"

If you don't include any JSON input data, you still must include the two braces, for example: \"input\": \"{}\"

Length constraints apply to the payload size, and are expressed as bytes in UTF-8 encoding.

" + }, + "traceHeader":{ + "shape":"TraceHeader", + "documentation":"

Passes the AWS X-Ray trace header. The trace header can also be passed in the request payload.

" + } + } + }, + "StartSyncExecutionOutput":{ + "type":"structure", + "required":[ + "executionArn", + "startDate", + "stopDate", + "status" + ], + "members":{ + "executionArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) that identifies the execution.

" + }, + "stateMachineArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) that identifies the state machine.

" + }, + "name":{ + "shape":"Name", + "documentation":"

The name of the execution.

" + }, + "startDate":{ + "shape":"Timestamp", + "documentation":"

The date the execution is started.

" + }, + "stopDate":{ + "shape":"Timestamp", + "documentation":"

If the execution has already ended, the date the execution stopped.

" + }, + "status":{ + "shape":"SyncExecutionStatus", + "documentation":"

The current status of the execution.

" + }, + "error":{ + "shape":"SensitiveError", + "documentation":"

The error code of the failure.

" + }, + "cause":{ + "shape":"SensitiveCause", + "documentation":"

A more detailed explanation of the cause of the failure.

" + }, + "input":{ + "shape":"SensitiveData", + "documentation":"

The string that contains the JSON input data of the execution. Length constraints apply to the payload size, and are expressed as bytes in UTF-8 encoding.

" + }, + "inputDetails":{"shape":"CloudWatchEventsExecutionDataDetails"}, + "output":{ + "shape":"SensitiveData", + "documentation":"

The JSON output data of the execution. Length constraints apply to the payload size, and are expressed as bytes in UTF-8 encoding.

This field is set only if the execution succeeds. If the execution fails, this field is null.

" + }, + "outputDetails":{"shape":"CloudWatchEventsExecutionDataDetails"}, + "traceHeader":{ + "shape":"TraceHeader", + "documentation":"

The AWS X-Ray trace header that was passed to the execution.

" + }, + "billingDetails":{ + "shape":"BillingDetails", + "documentation":"

An object that describes workflow billing details, including billed duration and memory use.

" + } + } + }, "StateEnteredEventDetails":{ "type":"structure", "required":["name"], @@ -1894,6 +2018,14 @@ } } }, + "SyncExecutionStatus":{ + "type":"string", + "enum":[ + "SUCCEEDED", + "FAILED", + "TIMED_OUT" + ] + }, "Tag":{ "type":"structure", "members":{ @@ -2268,7 +2400,7 @@ } } }, - "included":{"type":"boolean"}, + "includedDetails":{"type":"boolean"}, "truncated":{"type":"boolean"} }, "documentation":"AWS Step Functions

AWS Step Functions is a service that lets you coordinate the components of distributed applications and microservices using visual workflows.

You can use Step Functions to build applications from individual components, each of which performs a discrete function, or task, allowing you to scale and change applications quickly. Step Functions provides a console that helps visualize the components of your application as a series of steps. Step Functions automatically triggers and tracks each step, and retries steps when there are errors, so your application executes predictably and in the right order every time. Step Functions logs the state of each step, so you can quickly diagnose and debug any issues.

Step Functions manages operations and underlying infrastructure to ensure your application is available at any scale. You can run tasks on AWS, your own servers, or any system that has access to AWS. You can access and use Step Functions using the console, the AWS SDKs, or an HTTP API. For more information about Step Functions, see the AWS Step Functions Developer Guide .

" diff --git a/services/shield/pom.xml b/services/shield/pom.xml index 9ee14930e468..77f80e4298bd 100644 --- a/services/shield/pom.xml +++ b/services/shield/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT shield AWS Java SDK :: Services :: AWS Shield diff --git a/services/shield/src/main/resources/codegen-resources/paginators-1.json b/services/shield/src/main/resources/codegen-resources/paginators-1.json index cffb14b68194..362ad5d7198b 100644 --- a/services/shield/src/main/resources/codegen-resources/paginators-1.json +++ b/services/shield/src/main/resources/codegen-resources/paginators-1.json @@ -6,11 +6,21 @@ "output_token": "NextToken", "result_key": "AttackSummaries" }, + "ListProtectionGroups": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken" + }, "ListProtections": { "input_token": "NextToken", "limit_key": "MaxResults", "output_token": "NextToken", "result_key": "Protections" + }, + "ListResourcesInProtectionGroup": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken" } } } \ No newline at end of file diff --git a/services/shield/src/main/resources/codegen-resources/service-2.json b/services/shield/src/main/resources/codegen-resources/service-2.json index a37e6df3f89e..b0cc654c6796 100644 --- a/services/shield/src/main/resources/codegen-resources/service-2.json +++ b/services/shield/src/main/resources/codegen-resources/service-2.json @@ -104,6 +104,24 @@ ], "documentation":"

Enables AWS Shield Advanced for a specific AWS resource. The resource can be an Amazon CloudFront distribution, Elastic Load Balancing load balancer, AWS Global Accelerator accelerator, Elastic IP Address, or an Amazon Route 53 hosted zone.

You can add protection to only a single resource with each CreateProtection request. If you want to add protection to multiple resources at once, use the AWS WAF console. For more information see Getting Started with AWS Shield Advanced and Add AWS Shield Advanced Protection to more AWS Resources.

" }, + "CreateProtectionGroup":{ + "name":"CreateProtectionGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateProtectionGroupRequest"}, + "output":{"shape":"CreateProtectionGroupResponse"}, + "errors":[ + {"shape":"InternalErrorException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"OptimisticLockException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"LimitsExceededException"} + ], + "documentation":"

Creates a grouping of protected resources so they can be handled as a collective. This resource grouping improves the accuracy of detection and reduces false positives.

" + }, "CreateSubscription":{ "name":"CreateSubscription", "http":{ @@ -133,6 +151,21 @@ ], "documentation":"

Deletes an AWS Shield Advanced Protection.

" }, + "DeleteProtectionGroup":{ + "name":"DeleteProtectionGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteProtectionGroupRequest"}, + "output":{"shape":"DeleteProtectionGroupResponse"}, + "errors":[ + {"shape":"InternalErrorException"}, + {"shape":"OptimisticLockException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Removes the specified protection group.

" + }, "DeleteSubscription":{ "name":"DeleteSubscription", "http":{ @@ -163,6 +196,19 @@ ], "documentation":"

Describes the details of a DDoS attack.

" }, + "DescribeAttackStatistics":{ + "name":"DescribeAttackStatistics", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAttackStatisticsRequest"}, + "output":{"shape":"DescribeAttackStatisticsResponse"}, + "errors":[ + {"shape":"InternalErrorException"} + ], + "documentation":"

Provides information about the number and type of attacks AWS Shield has detected in the last year for all resources that belong to your account, regardless of whether you've defined Shield protections for them. This operation is available to Shield customers as well as to Shield Advanced customers.

The operation returns data for the time range of midnight UTC, one year ago, to midnight UTC, today. For example, if the current time is 2020-10-26 15:39:32 PDT, equal to 2020-10-26 22:39:32 UTC, then the time range for the attack data returned is from 2019-10-26 00:00:00 UTC to 2020-10-26 00:00:00 UTC.

The time range indicates the period covered by the attack statistics data items.

" + }, "DescribeDRTAccess":{ "name":"DescribeDRTAccess", "http":{ @@ -206,6 +252,20 @@ ], "documentation":"

Lists the details of a Protection object.

" }, + "DescribeProtectionGroup":{ + "name":"DescribeProtectionGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeProtectionGroupRequest"}, + "output":{"shape":"DescribeProtectionGroupResponse"}, + "errors":[ + {"shape":"InternalErrorException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Returns the specification for the specified protection group.

" + }, "DescribeSubscription":{ "name":"DescribeSubscription", "http":{ @@ -332,6 +392,21 @@ ], "documentation":"

Returns all ongoing DDoS attacks or all DDoS attacks during a specified time period.

" }, + "ListProtectionGroups":{ + "name":"ListProtectionGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListProtectionGroupsRequest"}, + "output":{"shape":"ListProtectionGroupsResponse"}, + "errors":[ + {"shape":"InternalErrorException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidPaginationTokenException"} + ], + "documentation":"

Retrieves the ProtectionGroup objects for the account.

" + }, "ListProtections":{ "name":"ListProtections", "http":{ @@ -347,6 +422,21 @@ ], "documentation":"

Lists all Protection objects for the account.

" }, + "ListResourcesInProtectionGroup":{ + "name":"ListResourcesInProtectionGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListResourcesInProtectionGroupRequest"}, + "output":{"shape":"ListResourcesInProtectionGroupResponse"}, + "errors":[ + {"shape":"InternalErrorException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidPaginationTokenException"} + ], + "documentation":"

Retrieves the resources that are included in the protection group.

" + }, "UpdateEmergencyContactSettings":{ "name":"UpdateEmergencyContactSettings", "http":{ @@ -363,6 +453,22 @@ ], "documentation":"

Updates the details of the list of email addresses and phone numbers that the DDoS Response Team (DRT) can use to contact you if you have proactive engagement enabled, for escalations to the DRT and to initiate proactive customer support.

" }, + "UpdateProtectionGroup":{ + "name":"UpdateProtectionGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateProtectionGroupRequest"}, + "output":{"shape":"UpdateProtectionGroupResponse"}, + "errors":[ + {"shape":"InternalErrorException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"OptimisticLockException"}, + {"shape":"InvalidParameterException"} + ], + "documentation":"

Updates an existing protection group. A protection group is a grouping of protected resources so they can be handled as a collective. This resource grouping improves the accuracy of detection and reduces false positives.

" + }, "UpdateSubscription":{ "name":"UpdateSubscription", "http":{ @@ -533,7 +639,7 @@ }, "TopContributors":{ "shape":"TopContributors", - "documentation":"

The array of Contributor objects that includes the top five contributors to an attack.

" + "documentation":"

The array of contributor objects that includes the top five contributors to an attack.

" }, "Unit":{ "shape":"Unit", @@ -559,6 +665,25 @@ "WORDPRESS_PINGBACK_SOURCE" ] }, + "AttackStatisticsDataItem":{ + "type":"structure", + "required":["AttackCount"], + "members":{ + "AttackVolume":{ + "shape":"AttackVolume", + "documentation":"

Information about the volume of attacks during the time period. If the accompanying AttackCount is zero, this setting might be empty.

" + }, + "AttackCount":{ + "shape":"Long", + "documentation":"

The number of attacks detected during the time period. This is always present, but might be zero.

" + } + }, + "documentation":"

A single attack statistics data record. This is returned by DescribeAttackStatistics along with a time range indicating the time period that the attack statistics apply to.

" + }, + "AttackStatisticsDataList":{ + "type":"list", + "member":{"shape":"AttackStatisticsDataItem"} + }, "AttackSummaries":{ "type":"list", "member":{"shape":"AttackSummary"} @@ -605,6 +730,35 @@ "type":"list", "member":{"shape":"AttackVectorDescription"} }, + "AttackVolume":{ + "type":"structure", + "members":{ + "BitsPerSecond":{ + "shape":"AttackVolumeStatistics", + "documentation":"

A statistics object that uses bits per second as the unit. This is included for network level attacks.

" + }, + "PacketsPerSecond":{ + "shape":"AttackVolumeStatistics", + "documentation":"

A statistics object that uses packets per second as the unit. This is included for network level attacks.

" + }, + "RequestsPerSecond":{ + "shape":"AttackVolumeStatistics", + "documentation":"

A statistics object that uses requests per second as the unit. This is included for application level attacks, and is only available for accounts that are subscribed to Shield Advanced.

" + } + }, + "documentation":"

Information about the volume of attacks during the time period, included in an AttackStatisticsDataItem. If the accompanying AttackCount in the statistics object is zero, this setting might be empty.

" + }, + "AttackVolumeStatistics":{ + "type":"structure", + "required":["Max"], + "members":{ + "Max":{ + "shape":"Double", + "documentation":"

The maximum attack volume observed for the given unit.

" + } + }, + "documentation":"

Statistics objects for the various data types in AttackVolume.

" + }, "AutoRenew":{ "type":"string", "enum":[ @@ -632,6 +786,41 @@ }, "documentation":"

A contributor to the attack and their contribution.

" }, + "CreateProtectionGroupRequest":{ + "type":"structure", + "required":[ + "ProtectionGroupId", + "Aggregation", + "Pattern" + ], + "members":{ + "ProtectionGroupId":{ + "shape":"ProtectionGroupId", + "documentation":"

The name of the protection group. You use this to identify the protection group in lists and to manage the protection group, for example to update, delete, or describe it.

" + }, + "Aggregation":{ + "shape":"ProtectionGroupAggregation", + "documentation":"

Defines how AWS Shield combines resource data for the group in order to detect, mitigate, and report events.

  • Sum - Use the total traffic across the group. This is a good choice for most cases. Examples include Elastic IP addresses for EC2 instances that scale manually or automatically.

  • Mean - Use the average of the traffic across the group. This is a good choice for resources that share traffic uniformly. Examples include accelerators and load balancers.

  • Max - Use the highest traffic from each resource. This is useful for resources that don't share traffic and for resources that share that traffic in a non-uniform way. Examples include CloudFront distributions and origin resources for CloudFront distributions.

" + }, + "Pattern":{ + "shape":"ProtectionGroupPattern", + "documentation":"

The criteria to use to choose the protected resources for inclusion in the group. You can include all resources that have protections, provide a list of resource Amazon Resource Names (ARNs), or include all resources of a specified resource type.

" + }, + "ResourceType":{ + "shape":"ProtectedResourceType", + "documentation":"

The resource type to include in the protection group. All protected resources of this type are included in the protection group. Newly protected resources of this type are automatically added to the group. You must set this when you set Pattern to BY_RESOURCE_TYPE and you must not set it for any other Pattern setting.

" + }, + "Members":{ + "shape":"ProtectionGroupMembers", + "documentation":"

The Amazon Resource Names (ARNs) of the resources to include in the protection group. You must set this when you set Pattern to ARBITRARY and you must not set it for any other Pattern setting.

" + } + } + }, + "CreateProtectionGroupResponse":{ + "type":"structure", + "members":{ + } + }, "CreateProtectionRequest":{ "type":"structure", "required":[ @@ -668,6 +857,21 @@ "members":{ } }, + "DeleteProtectionGroupRequest":{ + "type":"structure", + "required":["ProtectionGroupId"], + "members":{ + "ProtectionGroupId":{ + "shape":"ProtectionGroupId", + "documentation":"

The name of the protection group. You use this to identify the protection group in lists and to manage the protection group, for example to update, delete, or describe it.

" + } + } + }, + "DeleteProtectionGroupResponse":{ + "type":"structure", + "members":{ + } + }, "DeleteProtectionRequest":{ "type":"structure", "required":["ProtectionId"], @@ -714,6 +918,25 @@ } } }, + "DescribeAttackStatisticsRequest":{ + "type":"structure", + "members":{ + } + }, + "DescribeAttackStatisticsResponse":{ + "type":"structure", + "required":[ + "TimeRange", + "DataItems" + ], + "members":{ + "TimeRange":{"shape":"TimeRange"}, + "DataItems":{ + "shape":"AttackStatisticsDataList", + "documentation":"

The data that describes the attacks detected during the time period.

" + } + } + }, "DescribeDRTAccessRequest":{ "type":"structure", "members":{ @@ -746,6 +969,26 @@ } } }, + "DescribeProtectionGroupRequest":{ + "type":"structure", + "required":["ProtectionGroupId"], + "members":{ + "ProtectionGroupId":{ + "shape":"ProtectionGroupId", + "documentation":"

The name of the protection group. You use this to identify the protection group in lists and to manage the protection group, for example to update, delete, or describe it.

" + } + } + }, + "DescribeProtectionGroupResponse":{ + "type":"structure", + "required":["ProtectionGroup"], + "members":{ + "ProtectionGroup":{ + "shape":"ProtectionGroup", + "documentation":"

A grouping of protected resources that you and AWS Shield Advanced can monitor as a collective. This resource grouping improves the accuracy of detection and reduces false positives.

" + } + } + }, "DescribeProtectionRequest":{ "type":"structure", "members":{ @@ -940,9 +1183,17 @@ "InvalidParameterException":{ "type":"structure", "members":{ - "message":{"shape":"errorMessage"} + "message":{"shape":"errorMessage"}, + "reason":{ + "shape":"ValidationExceptionReason", + "documentation":"

Additional information about the exception.

" + }, + "fields":{ + "shape":"ValidationExceptionFieldList", + "documentation":"

Fields that caused the exception.

" + } }, - "documentation":"

Exception that indicates that the parameters passed to the API are invalid.

", + "documentation":"

Exception that indicates that the parameters passed to the API are invalid. If available, this exception includes details in additional properties.

", "exception":true }, "InvalidResourceException":{ @@ -1004,7 +1255,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

The maximum number of AttackSummary objects to be returned. If this is left blank, the first 20 results will be returned.

This is a maximum value; it is possible that AWS WAF will return the results in smaller batches. That is, the number of AttackSummary objects returned could be less than MaxResults, even if there are still more AttackSummary objects yet to return. If there are more AttackSummary objects to return, AWS WAF will always also return a NextToken.

" + "documentation":"

The maximum number of AttackSummary objects to return. If you leave this blank, Shield Advanced returns the first 20 results.

This is a maximum value. Shield Advanced might return the results in smaller batches. That is, the number of objects returned could be less than MaxResults, even if there are still more objects yet to return. If there are more objects to return, Shield Advanced returns a value in NextToken that you can use in your next request, to get the next batch of objects.

" } } }, @@ -1017,7 +1268,34 @@ }, "NextToken":{ "shape":"Token", - "documentation":"

The token returned by a previous call to indicate that there is more data available. If not null, more results are available. Pass this value for the NextMarker parameter in a subsequent call to ListAttacks to retrieve the next set of items.

AWS WAF might return the list of AttackSummary objects in batches smaller than the number specified by MaxResults. If there are more AttackSummary objects to return, AWS WAF will always also return a NextToken.

" + "documentation":"

The token returned by a previous call to indicate that there is more data available. If not null, more results are available. Pass this value for the NextMarker parameter in a subsequent call to ListAttacks to retrieve the next set of items.

Shield Advanced might return the list of AttackSummary objects in batches smaller than the number specified by MaxResults. If there are more attack summary objects to return, Shield Advanced will always also return a NextToken.

" + } + } + }, + "ListProtectionGroupsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"Token", + "documentation":"

The next token value from a previous call to ListProtectionGroups. Pass null if this is the first call.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of ProtectionGroup objects to return. If you leave this blank, Shield Advanced returns the first 20 results.

This is a maximum value. Shield Advanced might return the results in smaller batches. That is, the number of objects returned could be less than MaxResults, even if there are still more objects yet to return. If there are more objects to return, Shield Advanced returns a value in NextToken that you can use in your next request, to get the next batch of objects.

" + } + } + }, + "ListProtectionGroupsResponse":{ + "type":"structure", + "required":["ProtectionGroups"], + "members":{ + "ProtectionGroups":{ + "shape":"ProtectionGroups", + "documentation":"

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

If you specify a value for MaxResults and you have more protection groups than the value of MaxResults, AWS Shield Advanced returns this token that you can use in your next request, to get the next batch of objects.

" } } }, @@ -1030,7 +1308,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

The maximum number of Protection objects to be returned. If this is left blank the first 20 results will be returned.

This is a maximum value; it is possible that AWS WAF will return the results in smaller batches. That is, the number of Protection objects returned could be less than MaxResults, even if there are still more Protection objects yet to return. If there are more Protection objects to return, AWS WAF will always also return a NextToken.

" + "documentation":"

The maximum number of Protection objects to return. If you leave this blank, Shield Advanced returns the first 20 results.

This is a maximum value. Shield Advanced might return the results in smaller batches. That is, the number of objects returned could be less than MaxResults, even if there are still more objects yet to return. If there are more objects to return, Shield Advanced returns a value in NextToken that you can use in your next request, to get the next batch of objects.

" } } }, @@ -1043,7 +1321,39 @@ }, "NextToken":{ "shape":"Token", - "documentation":"

If you specify a value for MaxResults and you have more Protections than the value of MaxResults, AWS Shield Advanced returns a NextToken value in the response that allows you to list another group of Protections. For the second and subsequent ListProtections requests, specify the value of NextToken from the previous response to get information about another batch of Protections.

AWS WAF might return the list of Protection objects in batches smaller than the number specified by MaxResults. If there are more Protection objects to return, AWS WAF will always also return a NextToken.

" + "documentation":"

If you specify a value for MaxResults and you have more Protections than the value of MaxResults, AWS Shield Advanced returns a NextToken value in the response that allows you to list another group of Protections. For the second and subsequent ListProtections requests, specify the value of NextToken from the previous response to get information about another batch of Protections.

Shield Advanced might return the list of Protection objects in batches smaller than the number specified by MaxResults. If there are more Protection objects to return, Shield Advanced will always also return a NextToken.

" + } + } + }, + "ListResourcesInProtectionGroupRequest":{ + "type":"structure", + "required":["ProtectionGroupId"], + "members":{ + "ProtectionGroupId":{ + "shape":"ProtectionGroupId", + "documentation":"

The name of the protection group. You use this to identify the protection group in lists and to manage the protection group, for example to update, delete, or describe it.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

The next token value from a previous call to ListResourcesInProtectionGroup. Pass null if this is the first call.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of resource ARN objects to return. If you leave this blank, Shield Advanced returns the first 20 results.

This is a maximum value. Shield Advanced might return the results in smaller batches. That is, the number of objects returned could be less than MaxResults, even if there are still more objects yet to return. If there are more objects to return, Shield Advanced returns a value in NextToken that you can use in your next request, to get the next batch of objects.

" + } + } + }, + "ListResourcesInProtectionGroupResponse":{ + "type":"structure", + "required":["ResourceArns"], + "members":{ + "ResourceArns":{ + "shape":"ResourceArnList", + "documentation":"

The Amazon Resource Names (ARNs) of the resources that are included in the protection group.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

If you specify a value for MaxResults and you have more resources in the protection group than the value of MaxResults, AWS Shield Advanced returns this token that you can use in your next request, to get the next batch of objects.

" } } }, @@ -1118,6 +1428,17 @@ "PENDING" ] }, + "ProtectedResourceType":{ + "type":"string", + "enum":[ + "CLOUDFRONT_DISTRIBUTION", + "ROUTE_53_HOSTED_ZONE", + "ELASTIC_IP_ALLOCATION", + "CLASSIC_LOAD_BALANCER", + "APPLICATION_LOAD_BALANCER", + "GLOBAL_ACCELERATOR" + ] + }, "Protection":{ "type":"structure", "members":{ @@ -1127,7 +1448,7 @@ }, "Name":{ "shape":"ProtectionName", - "documentation":"

The friendly name of the protection. For example, My CloudFront distributions.

" + "documentation":"

The name of the protection. For example, My CloudFront distributions.

" }, "ResourceArn":{ "shape":"ResourceArn", @@ -1140,12 +1461,127 @@ }, "documentation":"

An object that represents a resource that is under DDoS protection.

" }, + "ProtectionGroup":{ + "type":"structure", + "required":[ + "ProtectionGroupId", + "Aggregation", + "Pattern", + "Members" + ], + "members":{ + "ProtectionGroupId":{ + "shape":"ProtectionGroupId", + "documentation":"

The name of the protection group. You use this to identify the protection group in lists and to manage the protection group, for example to update, delete, or describe it.

" + }, + "Aggregation":{ + "shape":"ProtectionGroupAggregation", + "documentation":"

Defines how AWS Shield combines resource data for the group in order to detect, mitigate, and report events.

  • Sum - Use the total traffic across the group. This is a good choice for most cases. Examples include Elastic IP addresses for EC2 instances that scale manually or automatically.

  • Mean - Use the average of the traffic across the group. This is a good choice for resources that share traffic uniformly. Examples include accelerators and load balancers.

  • Max - Use the highest traffic from each resource. This is useful for resources that don't share traffic and for resources that share that traffic in a non-uniform way. Examples include CloudFront distributions and origin resources for CloudFront distributions.

" + }, + "Pattern":{ + "shape":"ProtectionGroupPattern", + "documentation":"

The criteria to use to choose the protected resources for inclusion in the group. You can include all resources that have protections, provide a list of resource Amazon Resource Names (ARNs), or include all resources of a specified resource type.

" + }, + "ResourceType":{ + "shape":"ProtectedResourceType", + "documentation":"

The resource type to include in the protection group. All protected resources of this type are included in the protection group. You must set this when you set Pattern to BY_RESOURCE_TYPE and you must not set it for any other Pattern setting.

" + }, + "Members":{ + "shape":"ProtectionGroupMembers", + "documentation":"

The Amazon Resource Names (ARNs) of the resources to include in the protection group. You must set this when you set Pattern to ARBITRARY and you must not set it for any other Pattern setting.

" + } + }, + "documentation":"

A grouping of protected resources that you and AWS Shield Advanced can monitor as a collective. This resource grouping improves the accuracy of detection and reduces false positives.

" + }, + "ProtectionGroupAggregation":{ + "type":"string", + "enum":[ + "SUM", + "MEAN", + "MAX" + ] + }, + "ProtectionGroupArbitraryPatternLimits":{ + "type":"structure", + "required":["MaxMembers"], + "members":{ + "MaxMembers":{ + "shape":"Long", + "documentation":"

The maximum number of resources you can specify for a single arbitrary pattern in a protection group.

" + } + }, + "documentation":"

Limits settings on protection groups with arbitrary pattern type.

" + }, + "ProtectionGroupId":{ + "type":"string", + "max":36, + "min":1, + "pattern":"[a-zA-Z0-9\\\\-]*" + }, + "ProtectionGroupLimits":{ + "type":"structure", + "required":[ + "MaxProtectionGroups", + "PatternTypeLimits" + ], + "members":{ + "MaxProtectionGroups":{ + "shape":"Long", + "documentation":"

The maximum number of protection groups that you can have at one time.

" + }, + "PatternTypeLimits":{ + "shape":"ProtectionGroupPatternTypeLimits", + "documentation":"

Limits settings by pattern type in the protection groups for your subscription.

" + } + }, + "documentation":"

Limits settings on protection groups for your subscription.

" + }, + "ProtectionGroupMembers":{ + "type":"list", + "member":{"shape":"ResourceArn"}, + "max":10000, + "min":0 + }, + "ProtectionGroupPattern":{ + "type":"string", + "enum":[ + "ALL", + "ARBITRARY", + "BY_RESOURCE_TYPE" + ] + }, + "ProtectionGroupPatternTypeLimits":{ + "type":"structure", + "required":["ArbitraryPatternLimits"], + "members":{ + "ArbitraryPatternLimits":{ + "shape":"ProtectionGroupArbitraryPatternLimits", + "documentation":"

Limits settings on protection groups with arbitrary pattern type.

" + } + }, + "documentation":"

Limits settings by pattern type in the protection groups for your subscription.

" + }, + "ProtectionGroups":{ + "type":"list", + "member":{"shape":"ProtectionGroup"} + }, "ProtectionId":{ "type":"string", "max":36, "min":1, "pattern":"[a-zA-Z0-9\\\\-]*" }, + "ProtectionLimits":{ + "type":"structure", + "required":["ProtectedResourceTypeLimits"], + "members":{ + "ProtectedResourceTypeLimits":{ + "shape":"Limits", + "documentation":"

The maximum number of resource types that you can specify in a protection.

" + } + }, + "documentation":"

Limits settings on protections for your subscription.

" + }, "ProtectionName":{ "type":"string", "max":128, @@ -1159,9 +1595,13 @@ "ResourceAlreadyExistsException":{ "type":"structure", "members":{ - "message":{"shape":"errorMessage"} + "message":{"shape":"errorMessage"}, + "resourceType":{ + "shape":"String", + "documentation":"

The type of resource that already exists.

" + } }, - "documentation":"

Exception indicating the specified resource already exists.

", + "documentation":"

Exception indicating the specified resource already exists. If available, this exception includes details in additional properties.

", "exception":true }, "ResourceArn":{ @@ -1174,12 +1614,20 @@ "type":"list", "member":{"shape":"ResourceArn"} }, + "ResourceArnList":{ + "type":"list", + "member":{"shape":"ResourceArn"} + }, "ResourceNotFoundException":{ "type":"structure", "members":{ - "message":{"shape":"errorMessage"} + "message":{"shape":"errorMessage"}, + "resourceType":{ + "shape":"String", + "documentation":"

Type of resource.

" + } }, - "documentation":"

Exception indicating the specified resource does not exist.

", + "documentation":"

Exception indicating the specified resource does not exist. If available, this exception includes details in additional properties.

", "exception":true }, "RoleArn":{ @@ -1224,6 +1672,7 @@ }, "Subscription":{ "type":"structure", + "required":["SubscriptionLimits"], "members":{ "StartTime":{ "shape":"Timestamp", @@ -1248,10 +1697,32 @@ "ProactiveEngagementStatus":{ "shape":"ProactiveEngagementStatus", "documentation":"

If ENABLED, the DDoS Response Team (DRT) will use email and phone to notify contacts about escalations to the DRT and to initiate proactive customer support.

If PENDING, you have requested proactive engagement and the request is pending. The status changes to ENABLED when your request is fully processed.

If DISABLED, the DRT will not proactively notify contacts about escalations or to initiate proactive customer support.

" + }, + "SubscriptionLimits":{ + "shape":"SubscriptionLimits", + "documentation":"

Limits settings for your subscription.

" } }, "documentation":"

Information about the AWS Shield Advanced subscription for an account.

" }, + "SubscriptionLimits":{ + "type":"structure", + "required":[ + "ProtectionLimits", + "ProtectionGroupLimits" + ], + "members":{ + "ProtectionLimits":{ + "shape":"ProtectionLimits", + "documentation":"

Limits settings on protections for your subscription.

" + }, + "ProtectionGroupLimits":{ + "shape":"ProtectionGroupLimits", + "documentation":"

Limits settings on protection groups for your subscription.

" + } + }, + "documentation":"

Limits settings for your subscription.

" + }, "SubscriptionState":{ "type":"string", "enum":[ @@ -1324,7 +1795,7 @@ "documentation":"

The end time, in Unix time in seconds. For more information see timestamp.

" } }, - "documentation":"

The time range.

" + "documentation":"

The time range.

" }, "Timestamp":{"type":"timestamp"}, "Token":{ @@ -1360,6 +1831,41 @@ "members":{ } }, + "UpdateProtectionGroupRequest":{ + "type":"structure", + "required":[ + "ProtectionGroupId", + "Aggregation", + "Pattern" + ], + "members":{ + "ProtectionGroupId":{ + "shape":"ProtectionGroupId", + "documentation":"

The name of the protection group. You use this to identify the protection group in lists and to manage the protection group, for example to update, delete, or describe it.

" + }, + "Aggregation":{ + "shape":"ProtectionGroupAggregation", + "documentation":"

Defines how AWS Shield combines resource data for the group in order to detect, mitigate, and report events.

  • Sum - Use the total traffic across the group. This is a good choice for most cases. Examples include Elastic IP addresses for EC2 instances that scale manually or automatically.

  • Mean - Use the average of the traffic across the group. This is a good choice for resources that share traffic uniformly. Examples include accelerators and load balancers.

  • Max - Use the highest traffic from each resource. This is useful for resources that don't share traffic and for resources that share that traffic in a non-uniform way. Examples include CloudFront distributions and origin resources for CloudFront distributions.

" + }, + "Pattern":{ + "shape":"ProtectionGroupPattern", + "documentation":"

The criteria to use to choose the protected resources for inclusion in the group. You can include all resources that have protections, provide a list of resource Amazon Resource Names (ARNs), or include all resources of a specified resource type.

" + }, + "ResourceType":{ + "shape":"ProtectedResourceType", + "documentation":"

The resource type to include in the protection group. All protected resources of this type are included in the protection group. You must set this when you set Pattern to BY_RESOURCE_TYPE and you must not set it for any other Pattern setting.

" + }, + "Members":{ + "shape":"ProtectionGroupMembers", + "documentation":"

The Amazon Resource Names (ARNs) of the resources to include in the protection group. You must set this when you set Pattern to ARBITRARY and you must not set it for any other Pattern setting.

" + } + } + }, + "UpdateProtectionGroupResponse":{ + "type":"structure", + "members":{ + } + }, "UpdateSubscriptionRequest":{ "type":"structure", "members":{ @@ -1374,6 +1880,35 @@ "members":{ } }, + "ValidationExceptionField":{ + "type":"structure", + "required":[ + "name", + "message" + ], + "members":{ + "name":{ + "shape":"String", + "documentation":"

The name of the parameter that failed validation.

" + }, + "message":{ + "shape":"String", + "documentation":"

The message describing why the parameter failed validation.

" + } + }, + "documentation":"

Provides information about a particular parameter passed inside a request that resulted in an exception.

" + }, + "ValidationExceptionFieldList":{ + "type":"list", + "member":{"shape":"ValidationExceptionField"} + }, + "ValidationExceptionReason":{ + "type":"string", + "enum":[ + "FIELD_VALIDATION_FAILED", + "OTHER" + ] + }, "errorMessage":{"type":"string"} }, "documentation":"AWS Shield Advanced

This is the AWS Shield Advanced API Reference. This guide is for developers who need detailed information about the AWS Shield Advanced API actions, data types, and errors. For detailed information about AWS WAF and AWS Shield Advanced features and an overview of how to use the AWS WAF and AWS Shield Advanced APIs, see the AWS WAF and AWS Shield Developer Guide.

" diff --git a/services/signer/pom.xml b/services/signer/pom.xml index e26e5b3b4959..24c36beb0a33 100644 --- a/services/signer/pom.xml +++ b/services/signer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT signer AWS Java SDK :: Services :: Signer diff --git a/services/signer/src/main/resources/codegen-resources/service-2.json b/services/signer/src/main/resources/codegen-resources/service-2.json index c72da71cf41a..4902d01f6af4 100644 --- a/services/signer/src/main/resources/codegen-resources/service-2.json +++ b/services/signer/src/main/resources/codegen-resources/service-2.json @@ -13,6 +13,25 @@ "uid":"signer-2017-08-25" }, "operations":{ + "AddProfilePermission":{ + "name":"AddProfilePermission", + "http":{ + "method":"POST", + "requestUri":"/signing-profiles/{profileName}/permissions" + }, + "input":{"shape":"AddProfilePermissionRequest"}, + "output":{"shape":"AddProfilePermissionResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ServiceLimitExceededException"}, + {"shape":"ConflictException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalServiceErrorException"} + ], + "documentation":"

Adds cross-account permissions to a signing profile.

" + }, "CancelSigningProfile":{ "name":"CancelSigningProfile", "http":{ @@ -23,7 +42,7 @@ "errors":[ {"shape":"ResourceNotFoundException"}, {"shape":"AccessDeniedException"}, - {"shape":"ThrottlingException"}, + {"shape":"TooManyRequestsException"}, {"shape":"InternalServiceErrorException"} ], "documentation":"

Changes the state of an ACTIVE signing profile to CANCELED. A canceled profile is still viewable with the ListSigningProfiles operation, but it cannot perform new signing jobs, and is deleted two years after cancelation.

" @@ -39,6 +58,7 @@ "errors":[ {"shape":"ResourceNotFoundException"}, {"shape":"AccessDeniedException"}, + {"shape":"TooManyRequestsException"}, {"shape":"InternalServiceErrorException"} ], "documentation":"

Returns information about a specific code signing job. You specify the job by using the jobId value that is returned by the StartSigningJob operation.

" @@ -54,6 +74,7 @@ "errors":[ {"shape":"ResourceNotFoundException"}, {"shape":"AccessDeniedException"}, + {"shape":"TooManyRequestsException"}, {"shape":"InternalServiceErrorException"} ], "documentation":"

Returns information on a specific signing platform.

" @@ -69,11 +90,28 @@ "errors":[ {"shape":"ResourceNotFoundException"}, {"shape":"AccessDeniedException"}, - {"shape":"ThrottlingException"}, + {"shape":"TooManyRequestsException"}, {"shape":"InternalServiceErrorException"} ], "documentation":"

Returns information on a specific signing profile.

" }, + "ListProfilePermissions":{ + "name":"ListProfilePermissions", + "http":{ + "method":"GET", + "requestUri":"/signing-profiles/{profileName}/permissions" + }, + "input":{"shape":"ListProfilePermissionsRequest"}, + "output":{"shape":"ListProfilePermissionsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalServiceErrorException"} + ], + "documentation":"

Lists the cross-account permissions associated with a signing profile.

" + }, "ListSigningJobs":{ "name":"ListSigningJobs", "http":{ @@ -85,7 +123,7 @@ "errors":[ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, - {"shape":"ThrottlingException"}, + {"shape":"TooManyRequestsException"}, {"shape":"InternalServiceErrorException"} ], "documentation":"

Lists all your signing jobs. You can use the maxResults parameter to limit the number of signing jobs that are returned in the response. If additional jobs remain to be listed, code signing returns a nextToken value. Use this value in subsequent calls to ListSigningJobs to fetch the remaining values. You can continue calling ListSigningJobs with your maxResults parameter and with new values that code signing returns in the nextToken parameter until all of your signing jobs have been returned.

" @@ -101,7 +139,7 @@ "errors":[ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, - {"shape":"ThrottlingException"}, + {"shape":"TooManyRequestsException"}, {"shape":"InternalServiceErrorException"} ], "documentation":"

Lists all signing platforms available in code signing that match the request parameters. If additional jobs remain to be listed, code signing returns a nextToken value. Use this value in subsequent calls to ListSigningJobs to fetch the remaining values. You can continue calling ListSigningJobs with your maxResults parameter and with new values that code signing returns in the nextToken parameter until all of your signing jobs have been returned.

" @@ -116,7 +154,7 @@ "output":{"shape":"ListSigningProfilesResponse"}, "errors":[ {"shape":"AccessDeniedException"}, - {"shape":"ThrottlingException"}, + {"shape":"TooManyRequestsException"}, {"shape":"InternalServiceErrorException"} ], "documentation":"

Lists all available signing profiles in your AWS account. Returns only profiles with an ACTIVE status unless the includeCanceled request field is set to true. If additional jobs remain to be listed, code signing returns a nextToken value. Use this value in subsequent calls to ListSigningJobs to fetch the remaining values. You can continue calling ListSigningJobs with your maxResults parameter and with new values that code signing returns in the nextToken parameter until all of your signing jobs have been returned.

" @@ -132,7 +170,8 @@ "errors":[ {"shape":"InternalServiceErrorException"}, {"shape":"BadRequestException"}, - {"shape":"NotFoundException"} + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"} ], "documentation":"

Returns a list of the tags associated with a signing profile resource.

" }, @@ -148,11 +187,61 @@ {"shape":"ResourceNotFoundException"}, {"shape":"AccessDeniedException"}, {"shape":"ValidationException"}, - {"shape":"ThrottlingException"}, + {"shape":"TooManyRequestsException"}, {"shape":"InternalServiceErrorException"} ], "documentation":"

Creates a signing profile. A signing profile is a code signing template that can be used to carry out a pre-defined signing job. For more information, see http://docs.aws.amazon.com/signer/latest/developerguide/gs-profile.html

" }, + "RemoveProfilePermission":{ + "name":"RemoveProfilePermission", + "http":{ + "method":"DELETE", + "requestUri":"/signing-profiles/{profileName}/permissions/{statementId}" + }, + "input":{"shape":"RemoveProfilePermissionRequest"}, + "output":{"shape":"RemoveProfilePermissionResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalServiceErrorException"} + ], + "documentation":"

Removes cross-account permissions from a signing profile.

" + }, + "RevokeSignature":{ + "name":"RevokeSignature", + "http":{ + "method":"PUT", + "requestUri":"/signing-jobs/{jobId}/revoke" + }, + "input":{"shape":"RevokeSignatureRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalServiceErrorException"} + ], + "documentation":"

Changes the state of a signing job to REVOKED. This indicates that the signature is no longer valid.

" + }, + "RevokeSigningProfile":{ + "name":"RevokeSigningProfile", + "http":{ + "method":"PUT", + "requestUri":"/signing-profiles/{profileName}/revoke" + }, + "input":{"shape":"RevokeSigningProfileRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalServiceErrorException"} + ], + "documentation":"

Changes the state of a signing profile to REVOKED. This indicates that signatures generated using the signing profile after an effective start date are no longer valid.

" + }, "StartSigningJob":{ "name":"StartSigningJob", "http":{ @@ -166,6 +255,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"AccessDeniedException"}, {"shape":"ThrottlingException"}, + {"shape":"TooManyRequestsException"}, {"shape":"InternalServiceErrorException"} ], "documentation":"

Initiates a signing job to be performed on the code provided. Signing jobs are viewable by the ListSigningJobs operation for two years after they are performed. Note the following requirements:

  • You must create an Amazon S3 source bucket. For more information, see Create a Bucket in the Amazon S3 Getting Started Guide.

  • Your S3 source bucket must be version enabled.

  • You must create an S3 destination bucket. Code signing uses your S3 destination bucket to write your signed code.

  • You specify the name of the source and destination buckets when calling the StartSigningJob operation.

  • You must also specify a request token that identifies your request to code signing.

You can call the DescribeSigningJob and the ListSigningJobs actions after you call StartSigningJob.

For a Java example that shows how to use this action, see http://docs.aws.amazon.com/acm/latest/userguide/

" @@ -181,7 +271,8 @@ "errors":[ {"shape":"InternalServiceErrorException"}, {"shape":"BadRequestException"}, - {"shape":"NotFoundException"} + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"} ], "documentation":"

Adds one or more tags to a signing profile. Tags are labels that you can use to identify and organize your AWS resources. Each tag consists of a key and an optional value. To specify the signing profile, use its Amazon Resource Name (ARN). To specify the tag, use a key-value pair.

" }, @@ -196,26 +287,85 @@ "errors":[ {"shape":"InternalServiceErrorException"}, {"shape":"BadRequestException"}, - {"shape":"NotFoundException"} + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"} ], "documentation":"

Removes one or more tags from a signing profile. To remove the tags, specify a list of tag keys.

" } }, "shapes":{ - "key":{"type":"string"}, "AccessDeniedException":{ "type":"structure", "members":{ - "message":{"shape":"ErrorMessage"} + "message":{"shape":"ErrorMessage"}, + "code":{"shape":"ErrorCode"} }, "documentation":"

You do not have sufficient access to perform this action.

", "error":{"httpStatusCode":403}, "exception":true }, + "AccountId":{ + "type":"string", + "max":12, + "min":12, + "pattern":"^[0-9]{12}$" + }, + "AddProfilePermissionRequest":{ + "type":"structure", + "required":[ + "action", + "principal", + "statementId", + "profileName" + ], + "members":{ + "profileName":{ + "shape":"ProfileName", + "documentation":"

The human-readable name of the signing profile.

", + "location":"uri", + "locationName":"profileName" + }, + "profileVersion":{ + "shape":"ProfileVersion", + "documentation":"

The version of the signing profile.

" + }, + "action":{ + "shape":"String", + "documentation":"

The AWS Signer action permitted as part of cross-account permissions.

" + }, + "principal":{ + "shape":"String", + "documentation":"

The AWS principal receiving cross-account permissions. This may be an IAM role or another AWS account ID.

" + }, + "revisionId":{ + "shape":"String", + "documentation":"

A unique identifier for the current profile revision.

" + }, + "statementId":{ + "shape":"String", + "documentation":"

A unique identifier for the cross-account permission statement.

" + } + } + }, + "AddProfilePermissionResponse":{ + "type":"structure", + "members":{ + "revisionId":{ + "shape":"String", + "documentation":"

A unique identifier for the current profile revision.

" + } + } + }, + "Arn":{ + "type":"string", + "max":2048, + "min":20 + }, "BadRequestException":{ "type":"structure", "members":{ - "message":{"shape":"ErrorMessage"} + "message":{"shape":"ErrorMessage"}, + "code":{"shape":"ErrorCode"} }, "documentation":"

The request contains invalid parameters for the ARN or tags. This exception also occurs when you call a tagging API on a cancelled signing profile.

", "error":{"httpStatusCode":400}, @@ -240,8 +390,16 @@ }, "CertificateArn":{"type":"string"}, "ClientRequestToken":{"type":"string"}, - "CompletedAt":{"type":"timestamp"}, - "CreatedAt":{"type":"timestamp"}, + "ConflictException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"}, + "code":{"shape":"ErrorCode"} + }, + "documentation":"

The resource encountered a conflicting state.

", + "error":{"httpStatusCode":409}, + "exception":true + }, "DescribeSigningJobRequest":{ "type":"structure", "required":["jobId"], @@ -273,10 +431,18 @@ "shape":"PlatformId", "documentation":"

The microcontroller platform to which your signed code image will be distributed.

" }, + "platformDisplayName":{ + "shape":"DisplayName", + "documentation":"

A human-readable name for the signing platform associated with the signing job.

" + }, "profileName":{ "shape":"ProfileName", "documentation":"

The name of the profile that initiated the signing operation.

" }, + "profileVersion":{ + "shape":"ProfileVersion", + "documentation":"

The version of the signing profile used to initiate the signing job.

" + }, "overrides":{ "shape":"SigningPlatformOverrides", "documentation":"

A list of any overrides that were applied to the signing operation.

" @@ -286,13 +452,17 @@ "documentation":"

Map of user-assigned key-value pairs used during signing. These values contain any information that you specified for use in your signing job.

" }, "createdAt":{ - "shape":"CreatedAt", + "shape":"Timestamp", "documentation":"

Date and time that the signing job was created.

" }, "completedAt":{ - "shape":"CompletedAt", + "shape":"Timestamp", "documentation":"

Date and time that the signing job was completed.

" }, + "signatureExpiresAt":{ + "shape":"Timestamp", + "documentation":"

Thr expiration timestamp for the signature generated by the signing job.

" + }, "requestedBy":{ "shape":"RequestedBy", "documentation":"

The IAM principal that requested the signing job.

" @@ -305,9 +475,21 @@ "shape":"StatusReason", "documentation":"

String value that contains the status reason.

" }, + "revocationRecord":{ + "shape":"SigningJobRevocationRecord", + "documentation":"

A revocation record if the signature generated by the signing job has been revoked. Contains a timestamp and the ID of the IAM entity that revoked the signature.

" + }, "signedObject":{ "shape":"SignedObject", "documentation":"

Name of the S3 bucket where the signed code image is saved by code signing.

" + }, + "jobOwner":{ + "shape":"AccountId", + "documentation":"

The AWS account ID of the job owner.

" + }, + "jobInvoker":{ + "shape":"AccountId", + "documentation":"

The IAM entity that initiated the signing job.

" } } }, @@ -351,6 +533,7 @@ "type":"list", "member":{"shape":"EncryptionAlgorithm"} }, + "ErrorCode":{"type":"string"}, "ErrorMessage":{"type":"string"}, "GetSigningPlatformRequest":{ "type":"structure", @@ -398,6 +581,10 @@ "maxSizeInMB":{ "shape":"MaxSizeInMB", "documentation":"

The maximum size (in MB) of the payload that can be signed by the target platform.

" + }, + "revocationSupported":{ + "shape":"bool", + "documentation":"

A flag indicating whether signatures generated for the signing platform can be revoked.

" } } }, @@ -410,6 +597,12 @@ "documentation":"

The name of the target signing profile.

", "location":"uri", "locationName":"profileName" + }, + "profileOwner":{ + "shape":"AccountId", + "documentation":"

The AWS account ID of the profile owner.

", + "location":"querystring", + "locationName":"profileOwner" } } }, @@ -420,6 +613,15 @@ "shape":"ProfileName", "documentation":"

The name of the target signing profile.

" }, + "profileVersion":{ + "shape":"ProfileVersion", + "documentation":"

The current version of the signing profile.

" + }, + "profileVersionArn":{ + "shape":"Arn", + "documentation":"

The signing profile ARN, including the profile version.

" + }, + "revocationRecord":{"shape":"SigningProfileRevocationRecord"}, "signingMaterial":{ "shape":"SigningMaterial", "documentation":"

The ARN of the certificate that the target profile uses for signing operations.

" @@ -428,6 +630,11 @@ "shape":"PlatformId", "documentation":"

The ID of the platform that is used by the target signing profile.

" }, + "platformDisplayName":{ + "shape":"DisplayName", + "documentation":"

A human-readable name for the signing platform associated with the signing profile.

" + }, + "signatureValidityPeriod":{"shape":"SignatureValidityPeriod"}, "overrides":{ "shape":"SigningPlatformOverrides", "documentation":"

A list of overrides applied by the target signing profile for signing operations.

" @@ -440,6 +647,10 @@ "shape":"SigningProfileStatus", "documentation":"

The status of the target signing profile.

" }, + "statusReason":{ + "shape":"String", + "documentation":"

Reason for the status of the target signing profile.

" + }, "arn":{ "shape":"string", "documentation":"

The Amazon Resource Name (ARN) for the signing profile.

" @@ -491,10 +702,12 @@ "type":"list", "member":{"shape":"ImageFormat"} }, + "Integer":{"type":"integer"}, "InternalServiceErrorException":{ "type":"structure", "members":{ - "message":{"shape":"ErrorMessage"} + "message":{"shape":"ErrorMessage"}, + "code":{"shape":"ErrorCode"} }, "documentation":"

An internal error occurred.

", "error":{"httpStatusCode":500}, @@ -502,6 +715,45 @@ }, "JobId":{"type":"string"}, "Key":{"type":"string"}, + "ListProfilePermissionsRequest":{ + "type":"structure", + "required":["profileName"], + "members":{ + "profileName":{ + "shape":"ProfileName", + "documentation":"

Name of the signing profile containing the cross-account permissions.

", + "location":"uri", + "locationName":"profileName" + }, + "nextToken":{ + "shape":"String", + "documentation":"

String for specifying the next set of paginated results.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListProfilePermissionsResponse":{ + "type":"structure", + "members":{ + "revisionId":{ + "shape":"String", + "documentation":"

The identifier for the current revision of profile permissions.

" + }, + "policySizeBytes":{ + "shape":"PolicySizeBytes", + "documentation":"

Total size of the policy associated with the Signing Profile in bytes.

" + }, + "permissions":{ + "shape":"Permissions", + "documentation":"

List of permissions associated with the Signing Profile.

" + }, + "nextToken":{ + "shape":"String", + "documentation":"

String for specifying the next set of paginated results.

" + } + } + }, "ListSigningJobsRequest":{ "type":"structure", "members":{ @@ -534,6 +786,30 @@ "documentation":"

String for specifying the next set of paginated results to return. After you receive a response with truncated results, use this parameter in a subsequent request. Set it to the value of nextToken from the response that you just received.

", "location":"querystring", "locationName":"nextToken" + }, + "isRevoked":{ + "shape":"bool", + "documentation":"

Filters results to return only signing jobs with revoked signatures.

", + "location":"querystring", + "locationName":"isRevoked" + }, + "signatureExpiresBefore":{ + "shape":"Timestamp", + "documentation":"

Filters results to return only signing jobs with signatures expiring before a specified timestamp.

", + "location":"querystring", + "locationName":"signatureExpiresBefore" + }, + "signatureExpiresAfter":{ + "shape":"Timestamp", + "documentation":"

Filters results to return only signing jobs with signatures expiring after a specified timestamp.

", + "location":"querystring", + "locationName":"signatureExpiresAfter" + }, + "jobInvoker":{ + "shape":"AccountId", + "documentation":"

Filters results to return only signing jobs initiated by a specified IAM entity.

", + "location":"querystring", + "locationName":"jobInvoker" } } }, @@ -618,6 +894,18 @@ "documentation":"

Value for specifying the next set of paginated results to return. After you receive a response with truncated results, use this parameter in a subsequent request. Set it to the value of nextToken from the response that you just received.

", "location":"querystring", "locationName":"nextToken" + }, + "platformId":{ + "shape":"PlatformId", + "documentation":"

Filters results to return only signing jobs initiated for a specified signing platform.

", + "location":"querystring", + "locationName":"platformId" + }, + "statuses":{ + "shape":"Statuses", + "documentation":"

Filters results to return only signing jobs with statuses in the specified list.

", + "location":"querystring", + "locationName":"statuses" } } }, @@ -666,13 +954,41 @@ "NotFoundException":{ "type":"structure", "members":{ - "message":{"shape":"ErrorMessage"} + "message":{"shape":"ErrorMessage"}, + "code":{"shape":"ErrorCode"} }, "documentation":"

The signing profile was not found.

", "error":{"httpStatusCode":404}, "exception":true }, + "Permission":{ + "type":"structure", + "members":{ + "action":{ + "shape":"String", + "documentation":"

An AWS Signer action permitted as part of cross-account permissions.

" + }, + "principal":{ + "shape":"String", + "documentation":"

The AWS principal that has been granted a cross-account permission.

" + }, + "statementId":{ + "shape":"String", + "documentation":"

A unique identifier for a cross-account permission statement.

" + }, + "profileVersion":{ + "shape":"ProfileVersion", + "documentation":"

The signing profile version that a permission applies to.

" + } + }, + "documentation":"

A cross-account permission for a signing profile.

" + }, + "Permissions":{ + "type":"list", + "member":{"shape":"Permission"} + }, "PlatformId":{"type":"string"}, + "PolicySizeBytes":{"type":"integer"}, "Prefix":{"type":"string"}, "ProfileName":{ "type":"string", @@ -680,11 +996,16 @@ "min":2, "pattern":"^[a-zA-Z0-9_]{2,}" }, + "ProfileVersion":{ + "type":"string", + "max":10, + "min":10, + "pattern":"^[a-zA-Z0-9]{10}$" + }, "PutSigningProfileRequest":{ "type":"structure", "required":[ "profileName", - "signingMaterial", "platformId" ], "members":{ @@ -698,6 +1019,10 @@ "shape":"SigningMaterial", "documentation":"

The AWS Certificate Manager certificate that will be used to sign code with the new signing profile.

" }, + "signatureValidityPeriod":{ + "shape":"SignatureValidityPeriod", + "documentation":"

The default validity period override for any signature generated using this signing profile. If unspecified, the default is 135 months.

" + }, "platformId":{ "shape":"PlatformId", "documentation":"

The ID of the signing platform to be created.

" @@ -722,6 +1047,51 @@ "arn":{ "shape":"string", "documentation":"

The Amazon Resource Name (ARN) of the signing profile created.

" + }, + "profileVersion":{ + "shape":"ProfileVersion", + "documentation":"

The version of the signing profile being created.

" + }, + "profileVersionArn":{ + "shape":"Arn", + "documentation":"

The signing profile ARN, including the profile version.

" + } + } + }, + "RemoveProfilePermissionRequest":{ + "type":"structure", + "required":[ + "revisionId", + "profileName", + "statementId" + ], + "members":{ + "profileName":{ + "shape":"ProfileName", + "documentation":"

A human-readable name for the signing profile with permissions to be removed.

", + "location":"uri", + "locationName":"profileName" + }, + "revisionId":{ + "shape":"String", + "documentation":"

An identifier for the current revision of the signing profile permissions.

", + "location":"querystring", + "locationName":"revisionId" + }, + "statementId":{ + "shape":"String", + "documentation":"

A unique identifier for the cross-account permissions statement.

", + "location":"uri", + "locationName":"statementId" + } + } + }, + "RemoveProfilePermissionResponse":{ + "type":"structure", + "members":{ + "revisionId":{ + "shape":"String", + "documentation":"

An identifier for the current revision of the profile permissions.

" } } }, @@ -729,12 +1099,70 @@ "ResourceNotFoundException":{ "type":"structure", "members":{ - "message":{"shape":"ErrorMessage"} + "message":{"shape":"ErrorMessage"}, + "code":{"shape":"ErrorCode"} }, "documentation":"

A specified resource could not be found.

", "error":{"httpStatusCode":404}, "exception":true }, + "RevocationReasonString":{ + "type":"string", + "max":500, + "min":1 + }, + "RevokeSignatureRequest":{ + "type":"structure", + "required":[ + "reason", + "jobId" + ], + "members":{ + "jobId":{ + "shape":"JobId", + "documentation":"

ID of the signing job to be revoked.

", + "location":"uri", + "locationName":"jobId" + }, + "jobOwner":{ + "shape":"AccountId", + "documentation":"

AWS account ID of the job owner.

" + }, + "reason":{ + "shape":"RevocationReasonString", + "documentation":"

The reason for revoking the signing job.

" + } + } + }, + "RevokeSigningProfileRequest":{ + "type":"structure", + "required":[ + "profileVersion", + "reason", + "effectiveTime", + "profileName" + ], + "members":{ + "profileName":{ + "shape":"ProfileName", + "documentation":"

The name of the signing profile to be revoked.

", + "location":"uri", + "locationName":"profileName" + }, + "profileVersion":{ + "shape":"ProfileVersion", + "documentation":"

The version of the signing profile to be revoked.

" + }, + "reason":{ + "shape":"RevocationReasonString", + "documentation":"

The reason for revoking a signing profile.

" + }, + "effectiveTime":{ + "shape":"Timestamp", + "documentation":"

A timestamp for when revocation of a Signing Profile should become effective. Signatures generated using the signing profile after this timestamp are not trusted.

" + } + } + }, "S3Destination":{ "type":"structure", "members":{ @@ -757,7 +1185,7 @@ "documentation":"

Name of the S3 bucket.

" }, "key":{ - "shape":"key", + "shape":"Key", "documentation":"

Key name that uniquely identifies a signed code image in your bucket.

" } }, @@ -786,6 +1214,30 @@ }, "documentation":"

Information about the S3 bucket where you saved your unsigned code.

" }, + "ServiceLimitExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"}, + "code":{"shape":"ErrorCode"} + }, + "documentation":"

The client is making a request that exceeds service limits.

", + "error":{"httpStatusCode":402}, + "exception":true + }, + "SignatureValidityPeriod":{ + "type":"structure", + "members":{ + "value":{ + "shape":"Integer", + "documentation":"

The numerical value of the time unit for signature validity.

" + }, + "type":{ + "shape":"ValidityType", + "documentation":"

The time unit for signature validity.

" + } + }, + "documentation":"

The validity period for a signing job.

" + }, "SignedObject":{ "type":"structure", "members":{ @@ -866,16 +1318,66 @@ "documentation":"

A SigningMaterial object that contains the Amazon Resource Name (ARN) of the certificate used for the signing job.

" }, "createdAt":{ - "shape":"CreatedAt", + "shape":"Timestamp", "documentation":"

The date and time that the signing job was created.

" }, "status":{ "shape":"SigningStatus", "documentation":"

The status of the signing job.

" + }, + "isRevoked":{ + "shape":"bool", + "documentation":"

Indicates whether the signing job is revoked.

" + }, + "profileName":{ + "shape":"ProfileName", + "documentation":"

The name of the signing profile that created a signing job.

" + }, + "profileVersion":{ + "shape":"ProfileVersion", + "documentation":"

The version of the signing profile that created a signing job.

" + }, + "platformId":{ + "shape":"PlatformId", + "documentation":"

The unique identifier for a signing platform.

" + }, + "platformDisplayName":{ + "shape":"DisplayName", + "documentation":"

The name of a signing platform.

" + }, + "signatureExpiresAt":{ + "shape":"Timestamp", + "documentation":"

The time when the signature of a signing job expires.

" + }, + "jobOwner":{ + "shape":"AccountId", + "documentation":"

The AWS account ID of the job owner.

" + }, + "jobInvoker":{ + "shape":"AccountId", + "documentation":"

The AWS account ID of the job invoker.

" } }, "documentation":"

Contains information about a signing job.

" }, + "SigningJobRevocationRecord":{ + "type":"structure", + "members":{ + "reason":{ + "shape":"String", + "documentation":"

A caller-supplied reason for revocation.

" + }, + "revokedAt":{ + "shape":"Timestamp", + "documentation":"

The time of revocation.

" + }, + "revokedBy":{ + "shape":"String", + "documentation":"

The identity of the revoker.

" + } + }, + "documentation":"

Revocation information for a signing job.

" + }, "SigningJobs":{ "type":"list", "member":{"shape":"SigningJob"} @@ -929,6 +1431,10 @@ "maxSizeInMB":{ "shape":"MaxSizeInMB", "documentation":"

The maximum size (in MB) of code that can be signed by a code signing platform.

" + }, + "revocationSupported":{ + "shape":"bool", + "documentation":"

Indicates whether revocation is supported for the platform.

" } }, "documentation":"

Contains information about the signing configurations and parameters that are used to perform a code signing job.

" @@ -958,14 +1464,30 @@ "shape":"ProfileName", "documentation":"

The name of the signing profile.

" }, + "profileVersion":{ + "shape":"ProfileVersion", + "documentation":"

The version of a signing profile.

" + }, + "profileVersionArn":{ + "shape":"Arn", + "documentation":"

The ARN of a signing profile, including the profile version.

" + }, "signingMaterial":{ "shape":"SigningMaterial", "documentation":"

The ACM certificate that is available for use by a signing profile.

" }, + "signatureValidityPeriod":{ + "shape":"SignatureValidityPeriod", + "documentation":"

The validity period for a signing job created using this signing profile.

" + }, "platformId":{ "shape":"PlatformId", "documentation":"

The ID of a platform that is available for use by a signing profile.

" }, + "platformDisplayName":{ + "shape":"DisplayName", + "documentation":"

The name of the signing platform.

" + }, "signingParameters":{ "shape":"SigningParameters", "documentation":"

The parameters that are available for use by a code signing user.

" @@ -985,11 +1507,30 @@ }, "documentation":"

Contains information about the ACM certificates and code signing configuration parameters that can be used by a given code signing user.

" }, + "SigningProfileRevocationRecord":{ + "type":"structure", + "members":{ + "revocationEffectiveFrom":{ + "shape":"Timestamp", + "documentation":"

The time when revocation becomes effective.

" + }, + "revokedAt":{ + "shape":"Timestamp", + "documentation":"

The time when the signing profile was revoked.

" + }, + "revokedBy":{ + "shape":"String", + "documentation":"

The identity of the revoker.

" + } + }, + "documentation":"

Revocation information for a signing profile.

" + }, "SigningProfileStatus":{ "type":"string", "enum":[ "Active", - "Canceled" + "Canceled", + "Revoked" ] }, "SigningProfiles":{ @@ -1019,6 +1560,7 @@ "required":[ "source", "destination", + "profileName", "clientRequestToken" ], "members":{ @@ -1038,6 +1580,10 @@ "shape":"ClientRequestToken", "documentation":"

String that identifies the signing request. All calls after the first that use this token return the same response as the first call.

", "idempotencyToken":true + }, + "profileOwner":{ + "shape":"AccountId", + "documentation":"

The AWS account ID of the signing profile owner.

" } } }, @@ -1047,10 +1593,18 @@ "jobId":{ "shape":"JobId", "documentation":"

The ID of your signing job.

" + }, + "jobOwner":{ + "shape":"AccountId", + "documentation":"

The AWS account ID of the signing job owner.

" } } }, "StatusReason":{"type":"string"}, + "Statuses":{ + "type":"list", + "member":{"shape":"SigningProfileStatus"} + }, "String":{"type":"string"}, "TagKey":{ "type":"string", @@ -1102,9 +1656,23 @@ "ThrottlingException":{ "type":"structure", "members":{ - "message":{"shape":"ErrorMessage"} + "message":{"shape":"ErrorMessage"}, + "code":{"shape":"ErrorCode"} + }, + "documentation":"

The request was denied due to request throttling.

Instead of this error, TooManyRequestsException should be used.

", + "deprecated":true, + "deprecatedMessage":"Instead of this error, TooManyRequestsException should be used.", + "error":{"httpStatusCode":429}, + "exception":true + }, + "Timestamp":{"type":"timestamp"}, + "TooManyRequestsException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"}, + "code":{"shape":"ErrorCode"} }, - "documentation":"

The signing job has been throttled.

", + "documentation":"

The allowed number of job-signing requests has been exceeded.

This error supersedes the error ThrottlingException.

", "error":{"httpStatusCode":429}, "exception":true }, @@ -1137,15 +1705,24 @@ "ValidationException":{ "type":"structure", "members":{ - "message":{"shape":"ErrorMessage"} + "message":{"shape":"ErrorMessage"}, + "code":{"shape":"ErrorCode"} }, "documentation":"

You signing certificate could not be validated.

", "error":{"httpStatusCode":400}, "exception":true }, + "ValidityType":{ + "type":"string", + "enum":[ + "DAYS", + "MONTHS", + "YEARS" + ] + }, "Version":{"type":"string"}, "bool":{"type":"boolean"}, "string":{"type":"string"} }, - "documentation":"

With code signing for IoT, you can sign code that you create for any IoT device that is supported by Amazon Web Services (AWS). Code signing is available through Amazon FreeRTOS and AWS IoT Device Management, and integrated with AWS Certificate Manager (ACM). In order to sign code, you import a third-party code signing certificate with ACM that is used to sign updates in Amazon FreeRTOS and AWS IoT Device Management. For general information about using code signing, see the Code Signing for IoT Developer Guide.

" + "documentation":"

AWS Signer is a fully managed code signing service to help you ensure the trust and integrity of your code.

AWS Signer supports the following applications:

With code signing for AWS Lambda, you can sign AWS Lambda deployment packages. Integrated support is provided for Amazon S3, Amazon CloudWatch, and AWS CloudTrail. In order to sign code, you create a signing profile and then use Signer to sign Lambda zip files in S3.

With code signing for IoT, you can sign code for any IoT device that is supported by AWS. IoT code signing is available for Amazon FreeRTOS and AWS IoT Device Management, and is integrated with AWS Certificate Manager (ACM). In order to sign code, you import a third-party code signing certificate using ACM, and use that to sign updates in Amazon FreeRTOS and AWS IoT Device Management.

For more information about AWS Signer, see the AWS Signer Developer Guide.

" } diff --git a/services/sms/pom.xml b/services/sms/pom.xml index 198c16ae2ca2..0de1b9874caa 100644 --- a/services/sms/pom.xml +++ b/services/sms/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT sms AWS Java SDK :: Services :: AWS Server Migration diff --git a/services/snowball/pom.xml b/services/snowball/pom.xml index 983fe22ab90a..38114a30b278 100644 --- a/services/snowball/pom.xml +++ b/services/snowball/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT snowball AWS Java SDK :: Services :: Amazon Snowball diff --git a/services/sns/pom.xml b/services/sns/pom.xml index 9af07065ba80..739e4e5f3f90 100644 --- a/services/sns/pom.xml +++ b/services/sns/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT sns AWS Java SDK :: Services :: Amazon SNS diff --git a/services/sns/src/main/resources/codegen-resources/service-2.json b/services/sns/src/main/resources/codegen-resources/service-2.json index 834d2866eca7..977b5d984635 100755 --- a/services/sns/src/main/resources/codegen-resources/service-2.json +++ b/services/sns/src/main/resources/codegen-resources/service-2.json @@ -126,7 +126,7 @@ {"shape":"TagPolicyException"}, {"shape":"ConcurrentAccessException"} ], - "documentation":"

Creates a topic to which notifications can be published. Users can create at most 100,000 topics. For more information, see https://aws.amazon.com/sns. This action is idempotent, so if the requester already owns a topic with the specified name, that topic's ARN is returned without creating a new topic.

" + "documentation":"

Creates a topic to which notifications can be published. Users can create at most 100,000 standard topics (at most 1,000 FIFO topics). For more information, see https://aws.amazon.com/sns. This action is idempotent, so if the requester already owns a topic with the specified name, that topic's ARN is returned without creating a new topic.

" }, "DeleteEndpoint":{ "name":"DeleteEndpoint", @@ -511,7 +511,7 @@ {"shape":"InternalErrorException"}, {"shape":"AuthorizationErrorException"} ], - "documentation":"

Use this request to set the default settings for sending SMS messages and receiving daily SMS usage reports.

You can override some of these settings for a single message when you use the Publish action with the MessageAttributes.entry.N parameter. For more information, see Sending an SMS Message in the Amazon SNS Developer Guide.

" + "documentation":"

Use this request to set the default settings for sending SMS messages and receiving daily SMS usage reports.

You can override some of these settings for a single message when you use the Publish action with the MessageAttributes.entry.N parameter. For more information, see Publishing to a mobile phone in the Amazon SNS Developer Guide.

" }, "SetSubscriptionAttributes":{ "name":"SetSubscriptionAttributes", @@ -565,7 +565,7 @@ {"shape":"AuthorizationErrorException"}, {"shape":"InvalidSecurityException"} ], - "documentation":"

Subscribes an endpoint to an Amazon SNS topic. If the endpoint type is HTTP/S or email, or if the endpoint and the topic are not in the same AWS account, the endpoint owner must the ConfirmSubscription action to confirm the subscription.

You call the ConfirmSubscription action with the token from the subscription response. Confirmation tokens are valid for three days.

This action is throttled at 100 transactions per second (TPS).

" + "documentation":"

Subscribes an endpoint to an Amazon SNS topic. If the endpoint type is HTTP/S or email, or if the endpoint and the topic are not in the same AWS account, the endpoint owner must run the ConfirmSubscription action to confirm the subscription.

You call the ConfirmSubscription action with the token from the subscription response. Confirmation tokens are valid for three days.

This action is throttled at 100 transactions per second (TPS).

" }, "TagResource":{ "name":"TagResource", @@ -820,11 +820,11 @@ "members":{ "Name":{ "shape":"topicName", - "documentation":"

The name of the topic you want to create.

Constraints: Topic names must be made up of only uppercase and lowercase ASCII letters, numbers, underscores, and hyphens, and must be between 1 and 256 characters long.

" + "documentation":"

The name of the topic you want to create.

Constraints: Topic names must be made up of only uppercase and lowercase ASCII letters, numbers, underscores, and hyphens, and must be between 1 and 256 characters long.

For a FIFO (first-in-first-out) topic, the name must end with the .fifo suffix.

" }, "Attributes":{ "shape":"TopicAttributesMap", - "documentation":"

A map of attributes with their corresponding values.

The following lists the names, descriptions, and values of the special request parameters that the CreateTopic action uses:

  • DeliveryPolicy – The policy that defines how Amazon SNS retries failed deliveries to HTTP/S endpoints.

  • DisplayName – The display name to use for a topic with SMS subscriptions.

  • Policy – The policy that defines who can access your topic. By default, only the topic owner can publish or subscribe to the topic.

The following attribute applies only to server-side-encryption:

  • KmsMasterKeyId - The ID of an AWS-managed customer master key (CMK) for Amazon SNS or a custom CMK. For more information, see Key Terms. For more examples, see KeyId in the AWS Key Management Service API Reference.

" + "documentation":"

A map of attributes with their corresponding values.

The following lists the names, descriptions, and values of the special request parameters that the CreateTopic action uses:

  • DeliveryPolicy – The policy that defines how Amazon SNS retries failed deliveries to HTTP/S endpoints.

  • DisplayName – The display name to use for a topic with SMS subscriptions.

  • FifoTopic – Set to true to create a FIFO topic.

  • Policy – The policy that defines who can access your topic. By default, only the topic owner can publish or subscribe to the topic.

The following attribute applies only to server-side-encryption:

  • KmsMasterKeyId – The ID of an AWS-managed customer master key (CMK) for Amazon SNS or a custom CMK. For more information, see Key Terms. For more examples, see KeyId in the AWS Key Management Service API Reference.

The following attributes apply only to FIFO topics:

  • FifoTopic – When this is set to true, a FIFO topic is created.

  • ContentBasedDeduplication – Enables content-based deduplication for FIFO topics.

    • By default, ContentBasedDeduplication is set to false. If you create a FIFO topic and this attribute is false, you must specify a value for the MessageDeduplicationId parameter for the Publish action.

    • When you set ContentBasedDeduplication to true, Amazon SNS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message).

      (Optional) To override the generated value, you can specify a value for the the MessageDeduplicationId parameter for the Publish action.

" }, "Tags":{ "shape":"TagList", @@ -1021,7 +1021,7 @@ "members":{ "Attributes":{ "shape":"TopicAttributesMap", - "documentation":"

A map of the topic's attributes. Attributes in this map include the following:

  • DeliveryPolicy – The JSON serialization of the topic's delivery policy.

  • DisplayName – The human-readable name used in the From field for notifications to email and email-json endpoints.

  • Owner – The AWS account ID of the topic's owner.

  • Policy – The JSON serialization of the topic's access control policy.

  • SubscriptionsConfirmed – The number of confirmed subscriptions for the topic.

  • SubscriptionsDeleted – The number of deleted subscriptions for the topic.

  • SubscriptionsPending – The number of subscriptions pending confirmation for the topic.

  • TopicArn – The topic's ARN.

  • EffectiveDeliveryPolicy – The JSON serialization of the effective delivery policy, taking system defaults into account.

The following attribute applies only to server-side-encryption:

  • KmsMasterKeyId - The ID of an AWS-managed customer master key (CMK) for Amazon SNS or a custom CMK. For more information, see Key Terms. For more examples, see KeyId in the AWS Key Management Service API Reference.

" + "documentation":"

A map of the topic's attributes. Attributes in this map include the following:

  • DeliveryPolicy – The JSON serialization of the topic's delivery policy.

  • DisplayName – The human-readable name used in the From field for notifications to email and email-json endpoints.

  • Owner – The AWS account ID of the topic's owner.

  • Policy – The JSON serialization of the topic's access control policy.

  • SubscriptionsConfirmed – The number of confirmed subscriptions for the topic.

  • SubscriptionsDeleted – The number of deleted subscriptions for the topic.

  • SubscriptionsPending – The number of subscriptions pending confirmation for the topic.

  • TopicArn – The topic's ARN.

  • EffectiveDeliveryPolicy – The JSON serialization of the effective delivery policy, taking system defaults into account.

The following attribute applies only to server-side-encryption:

  • KmsMasterKeyId - The ID of an AWS-managed customer master key (CMK) for Amazon SNS or a custom CMK. For more information, see Key Terms. For more examples, see KeyId in the AWS Key Management Service API Reference.

The following attributes apply only to FIFO topics:

  • FifoTopic – When this is set to true, a FIFO topic is created.

  • ContentBasedDeduplication – Enables content-based deduplication for FIFO topics.

    • By default, ContentBasedDeduplication is set to false. If you create a FIFO topic and this attribute is false, you must specify a value for the MessageDeduplicationId parameter for the Publish action.

    • When you set ContentBasedDeduplication to true, Amazon SNS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message).

      (Optional) To override the generated value, you can specify a value for the the MessageDeduplicationId parameter for the Publish action.

" } }, "documentation":"

Response for GetTopicAttributes action.

" @@ -1376,7 +1376,7 @@ "documentation":"

Binary type attributes can store any binary data, for example, compressed data, encrypted data, or images.

" } }, - "documentation":"

The user-specified message attribute value. For string data types, the value attribute has the same restrictions on the content as the message body. For more information, see Publish.

Name, type, and value must not be empty or null. In addition, the message body should not be empty or null. All parts of the message attribute, including name, type, and value, are included in the message size restriction, which is currently 256 KB (262,144 bytes). For more information, see Using Amazon SNS Message Attributes.

" + "documentation":"

The user-specified message attribute value. For string data types, the value attribute has the same restrictions on the content as the message body. For more information, see Publish.

Name, type, and value must not be empty or null. In addition, the message body should not be empty or null. All parts of the message attribute, including name, type, and value, are included in the message size restriction, which is currently 256 KB (262,144 bytes). For more information, see Amazon SNS message attributes and Publishing to a mobile phone in the Amazon SNS Developer Guide.

" }, "NotFoundException":{ "type":"structure", @@ -1474,6 +1474,14 @@ "MessageAttributes":{ "shape":"MessageAttributeMap", "documentation":"

Message attributes for Publish action.

" + }, + "MessageDeduplicationId":{ + "shape":"String", + "documentation":"

This parameter applies only to FIFO (first-in-first-out) topics. The MessageDeduplicationId can contain up to 128 alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~).

Every message must have a unique MessageDeduplicationId, which is a token used for deduplication of sent messages. If a message with a particular MessageDeduplicationId is sent successfully, any message sent with the same MessageDeduplicationId during the 5-minute deduplication interval is treated as a duplicate.

If the topic has ContentBasedDeduplication set, the system generates a MessageDeduplicationId based on the contents of the message. Your MessageDeduplicationId overrides the generated one.

" + }, + "MessageGroupId":{ + "shape":"String", + "documentation":"

This parameter applies only to FIFO (first-in-first-out) topics. The MessageGroupId can contain up to 128 alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~).

The MessageGroupId is a tag that specifies that a message belongs to a specific message group. Messages that belong to the same message group are processed in a FIFO manner (however, messages in different message groups might be processed out of order). Every message must include a MessageGroupId.

" } }, "documentation":"

Input for Publish action.

" @@ -1484,6 +1492,10 @@ "MessageId":{ "shape":"messageId", "documentation":"

Unique identifier assigned to the published message.

Length Constraint: Maximum 100 characters

" + }, + "SequenceNumber":{ + "shape":"String", + "documentation":"

This response element applies only to FIFO (first-in-first-out) topics.

The sequence number is a large, non-consecutive number that Amazon SNS assigns to each message. The length of SequenceNumber is 128 bits. SequenceNumber continues to increase for each MessageGroupId.

" } }, "documentation":"

Response for Publish action.

" @@ -1585,7 +1597,7 @@ }, "AttributeName":{ "shape":"attributeName", - "documentation":"

A map of attributes with their corresponding values.

The following lists the names, descriptions, and values of the special request parameters that the SetTopicAttributes action uses:

  • DeliveryPolicy – The policy that defines how Amazon SNS retries failed deliveries to HTTP/S endpoints.

  • FilterPolicy – The simple JSON object that lets your subscriber receive only a subset of messages, rather than receiving every message published to the topic.

  • RawMessageDelivery – When set to true, enables raw message delivery to Amazon SQS or HTTP/S endpoints. This eliminates the need for the endpoints to process JSON formatting, which is otherwise created for Amazon SNS metadata.

  • RedrivePolicy – When specified, sends undeliverable messages to the specified Amazon SQS dead-letter queue. Messages that can't be delivered due to client errors (for example, when the subscribed endpoint is unreachable) or server errors (for example, when the service that powers the subscribed endpoint becomes unavailable) are held in the dead-letter queue for further analysis or reprocessing.

" + "documentation":"

A map of attributes with their corresponding values.

The following lists the names, descriptions, and values of the special request parameters that this action uses:

  • DeliveryPolicy – The policy that defines how Amazon SNS retries failed deliveries to HTTP/S endpoints.

  • FilterPolicy – The simple JSON object that lets your subscriber receive only a subset of messages, rather than receiving every message published to the topic.

  • RawMessageDelivery – When set to true, enables raw message delivery to Amazon SQS or HTTP/S endpoints. This eliminates the need for the endpoints to process JSON formatting, which is otherwise created for Amazon SNS metadata.

  • RedrivePolicy – When specified, sends undeliverable messages to the specified Amazon SQS dead-letter queue. Messages that can't be delivered due to client errors (for example, when the subscribed endpoint is unreachable) or server errors (for example, when the service that powers the subscribed endpoint becomes unavailable) are held in the dead-letter queue for further analysis or reprocessing.

" }, "AttributeValue":{ "shape":"attributeValue", @@ -1607,7 +1619,7 @@ }, "AttributeName":{ "shape":"attributeName", - "documentation":"

A map of attributes with their corresponding values.

The following lists the names, descriptions, and values of the special request parameters that the SetTopicAttributes action uses:

  • DeliveryPolicy – The policy that defines how Amazon SNS retries failed deliveries to HTTP/S endpoints.

  • DisplayName – The display name to use for a topic with SMS subscriptions.

  • Policy – The policy that defines who can access your topic. By default, only the topic owner can publish or subscribe to the topic.

The following attribute applies only to server-side-encryption:

  • KmsMasterKeyId - The ID of an AWS-managed customer master key (CMK) for Amazon SNS or a custom CMK. For more information, see Key Terms. For more examples, see KeyId in the AWS Key Management Service API Reference.

" + "documentation":"

A map of attributes with their corresponding values.

The following lists the names, descriptions, and values of the special request parameters that the SetTopicAttributes action uses:

  • DeliveryPolicy – The policy that defines how Amazon SNS retries failed deliveries to HTTP/S endpoints.

  • DisplayName – The display name to use for a topic with SMS subscriptions.

  • Policy – The policy that defines who can access your topic. By default, only the topic owner can publish or subscribe to the topic.

The following attribute applies only to server-side-encryption:

  • KmsMasterKeyId – The ID of an AWS-managed customer master key (CMK) for Amazon SNS or a custom CMK. For more information, see Key Terms. For more examples, see KeyId in the AWS Key Management Service API Reference.

The following attribute applies only to FIFO topics:

  • ContentBasedDeduplication – Enables content-based deduplication for FIFO topics.

    • By default, ContentBasedDeduplication is set to false. If you create a FIFO topic and this attribute is false, you must specify a value for the MessageDeduplicationId parameter for the Publish action.

    • When you set ContentBasedDeduplication to true, Amazon SNS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message).

      (Optional) To override the generated value, you can specify a value for the the MessageDeduplicationId parameter for the Publish action.

" }, "AttributeValue":{ "shape":"attributeValue", @@ -1904,5 +1916,5 @@ "topicARN":{"type":"string"}, "topicName":{"type":"string"} }, - "documentation":"Amazon Simple Notification Service

Amazon Simple Notification Service (Amazon SNS) is a web service that enables you to build distributed web-enabled applications. Applications can use Amazon SNS to easily push real-time notification messages to interested subscribers over multiple delivery protocols. For more information about this product see https://aws.amazon.com/sns. For detailed information about Amazon SNS features and their associated API calls, see the Amazon SNS Developer Guide.

We also provide SDKs that enable you to access Amazon SNS from your preferred programming language. The SDKs contain functionality that automatically takes care of tasks such as: cryptographically signing your service requests, retrying requests, and handling error responses. For a list of available SDKs, go to Tools for Amazon Web Services.

" + "documentation":"Amazon Simple Notification Service

Amazon Simple Notification Service (Amazon SNS) is a web service that enables you to build distributed web-enabled applications. Applications can use Amazon SNS to easily push real-time notification messages to interested subscribers over multiple delivery protocols. For more information about this product see https://aws.amazon.com/sns. For detailed information about Amazon SNS features and their associated API calls, see the Amazon SNS Developer Guide.

For information on the permissions you need to use this API, see Identity and access management in Amazon SNS in the Amazon SNS Developer Guide.

We also provide SDKs that enable you to access Amazon SNS from your preferred programming language. The SDKs contain functionality that automatically takes care of tasks such as: cryptographically signing your service requests, retrying requests, and handling error responses. For a list of available SDKs, go to Tools for Amazon Web Services.

" } diff --git a/services/sqs/pom.xml b/services/sqs/pom.xml index 167f0b60662c..d8c204b5e88a 100644 --- a/services/sqs/pom.xml +++ b/services/sqs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT sqs AWS Java SDK :: Services :: Amazon SQS diff --git a/services/ssm/pom.xml b/services/ssm/pom.xml index aaaa6df1b2c7..6619e4936ebb 100644 --- a/services/ssm/pom.xml +++ b/services/ssm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT ssm AWS Java SDK :: Services :: AWS Simple Systems Management (SSM) diff --git a/services/ssm/src/main/resources/codegen-resources/service-2.json b/services/ssm/src/main/resources/codegen-resources/service-2.json index 1bf0a0992d3a..e6817055cd36 100644 --- a/services/ssm/src/main/resources/codegen-resources/service-2.json +++ b/services/ssm/src/main/resources/codegen-resources/service-2.json @@ -1129,7 +1129,7 @@ {"shape":"InvalidNextToken"}, {"shape":"InvalidKeyId"} ], - "documentation":"

Query a list of all parameters used by the AWS account.

" + "documentation":"

Retrieves the history of all changes to a parameter.

" }, "GetParameters":{ "name":"GetParameters", @@ -2432,7 +2432,7 @@ "members":{ "key":{ "shape":"AssociationFilterKey", - "documentation":"

The name of the filter.

" + "documentation":"

The name of the filter.

InstanceId has been deprecated.

" }, "value":{ "shape":"AssociationFilterValue", @@ -2904,7 +2904,7 @@ "members":{ "Key":{ "shape":"AutomationExecutionFilterKey", - "documentation":"

One or more keys to limit the results. Valid filter keys include the following: DocumentNamePrefix, ExecutionStatus, ExecutionId, ParentExecutionId, CurrentAction, StartTimeBefore, StartTimeAfter.

" + "documentation":"

One or more keys to limit the results. Valid filter keys include the following: DocumentNamePrefix, ExecutionStatus, ExecutionId, ParentExecutionId, CurrentAction, StartTimeBefore, StartTimeAfter, TargetResourceGroup.

" }, "Values":{ "shape":"AutomationExecutionFilterValueList", @@ -2924,7 +2924,8 @@ "StartTimeBefore", "StartTimeAfter", "AutomationType", - "TagKey" + "TagKey", + "TargetResourceGroup" ] }, "AutomationExecutionFilterList":{ @@ -7149,7 +7150,7 @@ "members":{ "Name":{ "shape":"PSParameterName", - "documentation":"

The name of a parameter you want to query.

" + "documentation":"

The name of the parameter for which you want to review history.

" }, "WithDecryption":{ "shape":"Boolean", @@ -8845,7 +8846,7 @@ "members":{ "AssociationFilterList":{ "shape":"AssociationFilterList", - "documentation":"

One or more filters. Use a filter to return a more specific list of results.

" + "documentation":"

One or more filters. Use a filter to return a more specific list of results.

Filtering associations using the InstanceID attribute only returns legacy associations created using the InstanceID attribute. Associations targeting the instance that are part of the Target Attributes ResourceGroup or Tags are not returned.

" }, "MaxResults":{ "shape":"MaxResults", @@ -10800,7 +10801,7 @@ "members":{ "message":{"shape":"String"} }, - "documentation":"

The parameter exceeded the maximum number of allowed versions.

", + "documentation":"

Parameter Store retains the 100 most recently created versions of a parameter. After this number of versions has been created, Parameter Store deletes the oldest version when a new one is created. However, if the oldest version has a label attached to it, Parameter Store will not delete the version and instead presents this error message:

An error occurred (ParameterMaxVersionLimitExceeded) when calling the PutParameter operation: You attempted to create a new version of parameter-name by calling the PutParameter API with the overwrite flag. Version version-number, the oldest version, can't be deleted because it has a label associated with it. Move the label to another version of the parameter, and try again.

This safeguard is to prevent parameter versions with mission critical labels assigned to them from being deleted. To continue creating new parameters, first move the label from the oldest version of the parameter to a newer one for use in your operations. For information about moving parameter labels, see Move a parameter label (console) or Move a parameter label (CLI) in the AWS Systems Manager User Guide.

", "exception":true }, "ParameterMetadata":{ @@ -11025,7 +11026,7 @@ "members":{ "Id":{ "shape":"PatchId", - "documentation":"

The ID of the patch (this is different than the Microsoft Knowledge Base ID).

" + "documentation":"

The ID of the patch. Applies to Windows patches only.

This ID is not the same as the Microsoft Knowledge Base ID.

" }, "ReleaseDate":{ "shape":"DateTime", @@ -11049,31 +11050,71 @@ }, "ProductFamily":{ "shape":"PatchProductFamily", - "documentation":"

The product family the patch is applicable for (for example, Windows).

" + "documentation":"

The product family the patch is applicable for. For example, Windows or Amazon Linux 2.

" }, "Product":{ "shape":"PatchProduct", - "documentation":"

The specific product the patch is applicable for (for example, WindowsServer2016).

" + "documentation":"

The specific product the patch is applicable for. For example, WindowsServer2016 or AmazonLinux2018.03.

" }, "Classification":{ "shape":"PatchClassification", - "documentation":"

The classification of the patch (for example, SecurityUpdates, Updates, CriticalUpdates).

" + "documentation":"

The classification of the patch. For example, SecurityUpdates, Updates, or CriticalUpdates.

" }, "MsrcSeverity":{ "shape":"PatchMsrcSeverity", - "documentation":"

The severity of the patch (for example Critical, Important, Moderate).

" + "documentation":"

The severity of the patch, such as Critical, Important, or Moderate. Applies to Windows patches only.

" }, "KbNumber":{ "shape":"PatchKbNumber", - "documentation":"

The Microsoft Knowledge Base ID of the patch.

" + "documentation":"

The Microsoft Knowledge Base ID of the patch. Applies to Windows patches only.

" }, "MsrcNumber":{ "shape":"PatchMsrcNumber", - "documentation":"

The ID of the MSRC bulletin the patch is related to.

" + "documentation":"

The ID of the Microsoft Security Response Center (MSRC) bulletin the patch is related to. For example, MS14-045. Applies to Windows patches only.

" }, "Language":{ "shape":"PatchLanguage", "documentation":"

The language of the patch if it's language-specific.

" + }, + "AdvisoryIds":{ + "shape":"PatchAdvisoryIdList", + "documentation":"

The Advisory ID of the patch. For example, RHSA-2020:3779. Applies to Linux-based instances only.

" + }, + "BugzillaIds":{ + "shape":"PatchBugzillaIdList", + "documentation":"

The Bugzilla ID of the patch. For example, 1600646. Applies to Linux-based instances only.

" + }, + "CVEIds":{ + "shape":"PatchCVEIdList", + "documentation":"

The Common Vulnerabilities and Exposures (CVE) ID of the patch. For example, CVE-1999-0067. Applies to Linux-based instances only.

" + }, + "Name":{ + "shape":"PatchName", + "documentation":"

The name of the patch. Applies to Linux-based instances only.

" + }, + "Epoch":{ + "shape":"PatchEpoch", + "documentation":"

The epoch of the patch. For example in pkg-example-EE-20180914-2.2.amzn1.noarch, the epoch value is 20180914-2. Applies to Linux-based instances only.

" + }, + "Version":{ + "shape":"PatchVersion", + "documentation":"

The version number of the patch. For example, in example-pkg-1.710.10-2.7.abcd.x86_64, the version number is indicated by -1. Applies to Linux-based instances only.

" + }, + "Release":{ + "shape":"PatchRelease", + "documentation":"

The particular release of a patch. For example, in pkg-example-EE-20180914-2.2.amzn1.noarch, the release is 2.amaz1. Applies to Linux-based instances only.

" + }, + "Arch":{ + "shape":"PatchArch", + "documentation":"

The architecture of the patch. For example, in example-pkg-0.710.10-2.7.abcd.x86_64, the architecture is indicated by x86_64. Applies to Linux-based instances only.

" + }, + "Severity":{ + "shape":"PatchSeverity", + "documentation":"

The severity level of the patch. For example, CRITICAL or MODERATE.

" + }, + "Repository":{ + "shape":"PatchRepository", + "documentation":"

The source patch repository for the operating system and version, such as trusty-security for Ubuntu Server 14.04 LTE and focal-security for Ubuntu Server 20.04 LTE. Applies to Linux-based instances only.

" } }, "documentation":"

Represents metadata about a patch.

" @@ -11085,6 +11126,12 @@ "BLOCK" ] }, + "PatchAdvisoryId":{"type":"string"}, + "PatchAdvisoryIdList":{ + "type":"list", + "member":{"shape":"PatchAdvisoryId"} + }, + "PatchArch":{"type":"string"}, "PatchBaselineIdentity":{ "type":"structure", "members":{ @@ -11120,6 +11167,17 @@ "max":100, "min":1 }, + "PatchBugzillaId":{"type":"string"}, + "PatchBugzillaIdList":{ + "type":"list", + "member":{"shape":"PatchBugzillaId"} + }, + "PatchCVEId":{"type":"string"}, + "PatchCVEIdList":{ + "type":"list", + "member":{"shape":"PatchCVEId"} + }, + "PatchCVEIds":{"type":"string"}, "PatchClassification":{"type":"string"}, "PatchComplianceData":{ "type":"structure", @@ -11155,6 +11213,10 @@ "InstalledTime":{ "shape":"DateTime", "documentation":"

The date/time the patch was installed on the instance. Note that not all operating systems provide this level of information.

" + }, + "CVEIds":{ + "shape":"PatchCVEIds", + "documentation":"

The IDs of one or more Common Vulnerabilities and Exposure (CVE) issues that are resolved by the patch.

" } }, "documentation":"

Information about the state of a patch on a particular instance as it relates to the patch baseline used to patch the instance.

" @@ -11202,6 +11264,7 @@ ] }, "PatchDescription":{"type":"string"}, + "PatchEpoch":{"type":"integer"}, "PatchFailedCount":{"type":"integer"}, "PatchFilter":{ "type":"structure", @@ -11235,15 +11298,25 @@ "PatchFilterKey":{ "type":"string", "enum":[ + "ARCH", + "ADVISORY_ID", + "BUGZILLA_ID", "PATCH_SET", "PRODUCT", "PRODUCT_FAMILY", "CLASSIFICATION", + "CVE_ID", + "EPOCH", "MSRC_SEVERITY", + "NAME", "PATCH_ID", "SECTION", "PRIORITY", - "SEVERITY" + "REPOSITORY", + "RELEASE", + "SEVERITY", + "SECURITY", + "VERSION" ] }, "PatchFilterList":{ @@ -11315,6 +11388,7 @@ "PatchMissingCount":{"type":"integer"}, "PatchMsrcNumber":{"type":"string"}, "PatchMsrcSeverity":{"type":"string"}, + "PatchName":{"type":"string"}, "PatchNotApplicableCount":{"type":"integer"}, "PatchOperationType":{ "type":"string", @@ -11379,6 +11453,8 @@ "key":{"shape":"AttributeName"}, "value":{"shape":"AttributeValue"} }, + "PatchRelease":{"type":"string"}, + "PatchRepository":{"type":"string"}, "PatchRule":{ "type":"structure", "required":["PatchFilterGroup"], @@ -11510,6 +11586,7 @@ "PatchTitle":{"type":"string"}, "PatchUnreportedNotApplicableCount":{"type":"integer"}, "PatchVendor":{"type":"string"}, + "PatchVersion":{"type":"string"}, "PingStatus":{ "type":"string", "enum":[ @@ -11658,7 +11735,7 @@ }, "Type":{ "shape":"ParameterType", - "documentation":"

The type of parameter that you want to add to the system.

SecureString is not currently supported for AWS CloudFormation templates or in the China Regions.

Items in a StringList must be separated by a comma (,). You can't use other punctuation or special character to escape items in the list. If you have a parameter value that requires a comma, then use the String data type.

Specifying a parameter type is not required when updating a parameter. You must specify a parameter type when creating a parameter.

" + "documentation":"

The type of parameter that you want to add to the system.

SecureString is not currently supported for AWS CloudFormation templates.

Items in a StringList must be separated by a comma (,). You can't use other punctuation or special character to escape items in the list. If you have a parameter value that requires a comma, then use the String data type.

Specifying a parameter type is not required when updating a parameter. You must specify a parameter type when creating a parameter.

" }, "KeyId":{ "shape":"ParameterKeyId", @@ -12689,7 +12766,7 @@ }, "value":{ "shape":"SessionFilterValue", - "documentation":"

The filter value. Valid values for each filter key are as follows:

  • InvokedAfter: Specify a timestamp to limit your results. For example, specify 2018-08-29T00:00:00Z to see sessions that started August 29, 2018, and later.

  • InvokedBefore: Specify a timestamp to limit your results. For example, specify 2018-08-29T00:00:00Z to see sessions that started before August 29, 2018.

  • Target: Specify an instance to which session connections have been made.

  • Owner: Specify an AWS user account to see a list of sessions started by that user.

  • Status: Specify a valid session status to see a list of all sessions with that status. Status values you can specify include:

    • Connected

    • Connecting

    • Disconnected

    • Terminated

    • Terminating

    • Failed

" + "documentation":"

The filter value. Valid values for each filter key are as follows:

  • InvokedAfter: Specify a timestamp to limit your results. For example, specify 2018-08-29T00:00:00Z to see sessions that started August 29, 2018, and later.

  • InvokedBefore: Specify a timestamp to limit your results. For example, specify 2018-08-29T00:00:00Z to see sessions that started before August 29, 2018.

  • Target: Specify an instance to which session connections have been made.

  • Owner: Specify an AWS user account to see a list of sessions started by that user.

  • Status: Specify a valid session status to see a list of all sessions with that status. Status values you can specify include:

    • Connected

    • Connecting

    • Disconnected

    • Terminated

    • Terminating

    • Failed

  • SessionId: Specify a session ID to return details about the session.

" } }, "documentation":"

Describes a filter for Session Manager information.

" @@ -12701,13 +12778,14 @@ "InvokedBefore", "Target", "Owner", - "Status" + "Status", + "SessionId" ] }, "SessionFilterList":{ "type":"list", "member":{"shape":"SessionFilter"}, - "max":5, + "max":6, "min":1 }, "SessionFilterValue":{ diff --git a/services/sso/pom.xml b/services/sso/pom.xml index f28b341ce3d2..fccc71434f7f 100644 --- a/services/sso/pom.xml +++ b/services/sso/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT sso AWS Java SDK :: Services :: SSO @@ -56,5 +56,23 @@ aws-json-protocol ${awsjavasdk.version} + + software.amazon.awssdk + profiles + ${awsjavasdk.version} + compile + + + + + com.google.jimfs + jimfs + test + + + com.google.guava + guava + test + diff --git a/services/sso/src/main/java/software/amazon/awssdk/services/sso/auth/ExpiredTokenException.java b/services/sso/src/main/java/software/amazon/awssdk/services/sso/auth/ExpiredTokenException.java new file mode 100644 index 000000000000..9284bcace31e --- /dev/null +++ b/services/sso/src/main/java/software/amazon/awssdk/services/sso/auth/ExpiredTokenException.java @@ -0,0 +1,90 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.sso.auth; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.core.SdkField; +import software.amazon.awssdk.core.SdkPojo; +import software.amazon.awssdk.core.exception.SdkClientException; + +/** + *

+ * The session token that was passed is expired or is not valid. + *

+ */ +@SdkPublicApi +public final class ExpiredTokenException extends SdkClientException { + + private static final List> SDK_FIELDS = Collections.unmodifiableList(Arrays.asList()); + + private ExpiredTokenException(Builder b) { + super(b); + } + + @Override + public Builder toBuilder() { + return new BuilderImpl(this); + } + + public static Builder builder() { + return new BuilderImpl(); + } + + public interface Builder extends SdkPojo, SdkClientException.Builder { + @Override + Builder message(String message); + + @Override + Builder cause(Throwable cause); + + @Override + ExpiredTokenException build(); + } + + static final class BuilderImpl extends SdkClientException.BuilderImpl implements Builder { + private BuilderImpl() { + } + + private BuilderImpl(ExpiredTokenException model) { + super(model); + } + + @Override + public BuilderImpl message(String message) { + this.message = message; + return this; + } + + @Override + public BuilderImpl cause(Throwable cause) { + this.cause = cause; + return this; + } + + @Override + public ExpiredTokenException build() { + return new ExpiredTokenException(this); + } + + @Override + public List> sdkFields() { + return SDK_FIELDS; + } + } +} diff --git a/services/sso/src/main/java/software/amazon/awssdk/services/sso/auth/SsoCredentialsProvider.java b/services/sso/src/main/java/software/amazon/awssdk/services/sso/auth/SsoCredentialsProvider.java new file mode 100644 index 000000000000..3663771fe572 --- /dev/null +++ b/services/sso/src/main/java/software/amazon/awssdk/services/sso/auth/SsoCredentialsProvider.java @@ -0,0 +1,256 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.sso.auth; + +import static software.amazon.awssdk.utils.Validate.notNull; + +import java.time.Duration; +import java.time.Instant; +import java.util.Optional; +import java.util.function.Consumer; +import java.util.function.Supplier; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.auth.credentials.AwsCredentials; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.auth.credentials.AwsSessionCredentials; +import software.amazon.awssdk.services.sso.SsoClient; +import software.amazon.awssdk.services.sso.internal.SessionCredentialsHolder; +import software.amazon.awssdk.services.sso.model.GetRoleCredentialsRequest; +import software.amazon.awssdk.services.sso.model.RoleCredentials; +import software.amazon.awssdk.utils.SdkAutoCloseable; +import software.amazon.awssdk.utils.cache.CachedSupplier; +import software.amazon.awssdk.utils.cache.NonBlocking; +import software.amazon.awssdk.utils.cache.RefreshResult; + +/** + *

+ * An implementation of {@link AwsCredentialsProvider} that is extended within this package to provide support for + * periodically updating session credentials. This credential provider maintains a {@link Supplier} + * for a {@link SsoClient#getRoleCredentials(Consumer)} call to retrieve the credentials needed. + *

+ * + *

+ * While creating the {@link GetRoleCredentialsRequest}, an access token is needed to be resolved from a token file. + * In default, the token is assumed unexpired, and if it's expired then an {@link ExpiredTokenException} will be thrown. + * If the users want to change the behavior of this, please implement your own token resolving logic and override the + * {@link Builder#refreshRequest). + *

+ * + *

+ * When credentials get close to expiration, this class will attempt to update them asynchronously. If the credentials + * end up expiring, this class will block all calls to {@link #resolveCredentials()} until the credentials can be updated. + *

+ */ +@SdkPublicApi +public final class SsoCredentialsProvider implements AwsCredentialsProvider, SdkAutoCloseable { + + private static final Duration DEFAULT_STALE_TIME = Duration.ofMinutes(1); + private static final Duration DEFAULT_PREFETCH_TIME = Duration.ofMinutes(5); + + private static final String ASYNC_THREAD_NAME = "sdk-sso-credentials-provider"; + + private final Supplier getRoleCredentialsRequestSupplier; + + private final SsoClient ssoClient; + private final Duration staleTime; + private final Duration prefetchTime; + + private final CachedSupplier credentialCache; + + /** + * @see #builder() + */ + private SsoCredentialsProvider(BuilderImpl builder) { + this.ssoClient = notNull(builder.ssoClient, "SSO client must not be null."); + this.getRoleCredentialsRequestSupplier = builder.getRoleCredentialsRequestSupplier; + + this.staleTime = Optional.ofNullable(builder.staleTime).orElse(DEFAULT_STALE_TIME); + this.prefetchTime = Optional.ofNullable(builder.prefetchTime).orElse(DEFAULT_PREFETCH_TIME); + + CachedSupplier.Builder cacheBuilder = CachedSupplier.builder(this::updateSsoCredentials); + if (builder.asyncCredentialUpdateEnabled) { + cacheBuilder.prefetchStrategy(new NonBlocking(ASYNC_THREAD_NAME)); + } + + this.credentialCache = cacheBuilder.build(); + } + + /** + * Update the expiring session SSO credentials by calling SSO. Invoked by {@link CachedSupplier} when the credentials + * are close to expiring. + */ + private RefreshResult updateSsoCredentials() { + SessionCredentialsHolder credentials = getUpdatedCredentials(ssoClient); + Instant acutalTokenExpiration = credentials.sessionCredentialsExpiration(); + + return RefreshResult.builder(credentials) + .staleTime(acutalTokenExpiration.minus(staleTime)) + .prefetchTime(acutalTokenExpiration.minus(prefetchTime)) + .build(); + } + + private SessionCredentialsHolder getUpdatedCredentials(SsoClient ssoClient) { + GetRoleCredentialsRequest request = getRoleCredentialsRequestSupplier.get(); + notNull(request, "GetRoleCredentialsRequest can't be null."); + RoleCredentials roleCredentials = ssoClient.getRoleCredentials(request).roleCredentials(); + AwsSessionCredentials sessionCredentials = AwsSessionCredentials.create(roleCredentials.accessKeyId(), + roleCredentials.secretAccessKey(), + roleCredentials.sessionToken()); + return new SessionCredentialsHolder(sessionCredentials, Instant.ofEpochMilli(roleCredentials.expiration())); + } + + /** + * The amount of time, relative to session token expiration, that the cached credentials are considered stale and + * should no longer be used. All threads will block until the value is updated. + */ + public Duration staleTime() { + return staleTime; + } + + /** + * The amount of time, relative to session token expiration, that the cached credentials are considered close to stale + * and should be updated. + */ + public Duration prefetchTime() { + return prefetchTime; + } + + /** + * Get a builder for creating a custom {@link SsoCredentialsProvider}. + */ + public static BuilderImpl builder() { + return new BuilderImpl(); + } + + @Override + public AwsCredentials resolveCredentials() { + return credentialCache.get().sessionCredentials(); + } + + @Override + public void close() { + credentialCache.close(); + } + + /** + * A builder for creating a custom {@link SsoCredentialsProvider}. + */ + public interface Builder { + + /** + * Configure the {@link SsoClient} to use when calling SSO to update the session. This client should not be shut + * down as long as this credentials provider is in use. + */ + Builder ssoClient(SsoClient ssoclient); + + /** + * Configure whether the provider should fetch credentials asynchronously in the background. If this is true, + * threads are less likely to block when credentials are loaded, but addtiional resources are used to maintian + * the provider. + * + *

By default, this is disabled.

+ */ + Builder asyncCredentialUpdateEnabled(Boolean asyncCredentialUpdateEnabled); + + /** + * Configure the amount of time, relative to SSO session token expiration, that the cached credentials are considered + * stale and should no longer be used. All threads will block until the value is updated. + * + *

By default, this is 1 minute.

+ */ + Builder staleTime(Duration staleTime); + + /** + * Configure the amount of time, relative to SSO session token expiration, that the cached credentials are considered + * close to stale and should be updated. See {@link #asyncCredentialUpdateEnabled}. + * + *

By default, this is 5 minutes.

+ */ + Builder prefetchTime(Duration prefetchTime); + + /** + * Configure the {@link GetRoleCredentialsRequest} that should be periodically sent to the SSO service to update the + * credentials. + */ + Builder refreshRequest(GetRoleCredentialsRequest getRoleCredentialsRequest); + + /** + * Similar to {@link #refreshRequest(GetRoleCredentialsRequest)}, but takes a {@link Supplier} to supply the request to + * SSO. + */ + Builder refreshRequest(Supplier getRoleCredentialsRequestSupplier); + + /** + * Create a {@link SsoCredentialsProvider} using the configuration applied to this builder. + * @return + */ + SsoCredentialsProvider build(); + + } + + protected static final class BuilderImpl implements Builder { + private Boolean asyncCredentialUpdateEnabled = false; + private SsoClient ssoClient; + private Duration staleTime; + private Duration prefetchTime; + private Supplier getRoleCredentialsRequestSupplier; + + BuilderImpl() { + + } + + @Override + public Builder ssoClient(SsoClient ssoClient) { + this.ssoClient = ssoClient; + return this; + } + + @Override + public Builder asyncCredentialUpdateEnabled(Boolean asyncCredentialUpdateEnabled) { + this.asyncCredentialUpdateEnabled = asyncCredentialUpdateEnabled; + return this; + } + + @Override + public Builder staleTime(Duration staleTime) { + this.staleTime = staleTime; + return this; + } + + @Override + public Builder prefetchTime(Duration prefetchTime) { + this.prefetchTime = prefetchTime; + return this; + } + + @Override + public Builder refreshRequest(GetRoleCredentialsRequest getRoleCredentialsRequest) { + return refreshRequest(() -> getRoleCredentialsRequest); + } + + @Override + public Builder refreshRequest(Supplier getRoleCredentialsRequestSupplier) { + this.getRoleCredentialsRequestSupplier = getRoleCredentialsRequestSupplier; + return this; + } + + @Override + public SsoCredentialsProvider build() { + return new SsoCredentialsProvider(this); + } + + } +} diff --git a/services/sso/src/main/java/software/amazon/awssdk/services/sso/auth/SsoProfileCredentialsProviderFactory.java b/services/sso/src/main/java/software/amazon/awssdk/services/sso/auth/SsoProfileCredentialsProviderFactory.java new file mode 100644 index 000000000000..c7745714f258 --- /dev/null +++ b/services/sso/src/main/java/software/amazon/awssdk/services/sso/auth/SsoProfileCredentialsProviderFactory.java @@ -0,0 +1,112 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.sso.auth; + +import static software.amazon.awssdk.services.sso.internal.SsoTokenFileUtils.generateCachedTokenPath; +import static software.amazon.awssdk.utils.UserHomeDirectoryUtils.userHomeDirectory; + +import java.nio.file.Paths; +import java.util.function.Supplier; +import software.amazon.awssdk.annotations.SdkProtectedApi; +import software.amazon.awssdk.annotations.SdkTestInternalApi; +import software.amazon.awssdk.auth.credentials.AnonymousCredentialsProvider; +import software.amazon.awssdk.auth.credentials.AwsCredentials; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.auth.credentials.ProfileCredentialsProviderFactory; +import software.amazon.awssdk.profiles.Profile; +import software.amazon.awssdk.profiles.ProfileProperty; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.sso.SsoClient; +import software.amazon.awssdk.services.sso.internal.SsoAccessTokenProvider; +import software.amazon.awssdk.services.sso.model.GetRoleCredentialsRequest; +import software.amazon.awssdk.utils.IoUtils; +import software.amazon.awssdk.utils.SdkAutoCloseable; + +/** + * An implementation of {@link ProfileCredentialsProviderFactory} that allows users to get SSO role credentials using the startUrl + * specified in either a {@link Profile} or environment variables. + */ +@SdkProtectedApi +public class SsoProfileCredentialsProviderFactory implements ProfileCredentialsProviderFactory { + + private static final String TOKEN_DIRECTORY = Paths.get(userHomeDirectory(), ".aws", "sso", "cache").toString(); + + /** + * Default method to create the {@link SsoProfileCredentialsProvider} with a {@link SsoAccessTokenProvider} + * object created with the start url from {@link Profile} or environment variables and the default token file directory. + */ + public AwsCredentialsProvider create(Profile profile) { + return create(profile, new SsoAccessTokenProvider( + generateCachedTokenPath(profile.properties().get(ProfileProperty.SSO_START_URL), TOKEN_DIRECTORY))); + } + + /** + * Alternative method to create the {@link SsoProfileCredentialsProvider} with a customized + * {@link SsoAccessTokenProvider}. This method is only used for testing. + */ + @SdkTestInternalApi + public AwsCredentialsProvider create(Profile profile, + SsoAccessTokenProvider tokenProvider) { + return new SsoProfileCredentialsProvider(profile, tokenProvider); + } + + /** + * A wrapper for a {@link SsoCredentialsProvider} that is returned by this factory when {@link #create(Profile)} or + * {@link #create(Profile, SsoAccessTokenProvider)} is invoked. This wrapper is important because it ensures the parent + * credentials provider is closed when the sso credentials provider is no longer needed. + */ + private static final class SsoProfileCredentialsProvider implements AwsCredentialsProvider, SdkAutoCloseable { + private final SsoClient ssoClient; + private final SsoCredentialsProvider credentialsProvider; + + private SsoProfileCredentialsProvider(Profile profile, + SsoAccessTokenProvider tokenProvider) { + String ssoAccountId = profile.properties().get(ProfileProperty.SSO_ACCOUNT_ID); + String ssoRoleName = profile.properties().get(ProfileProperty.SSO_ROLE_NAME); + String ssoRegion = profile.properties().get(ProfileProperty.SSO_REGION); + + this.ssoClient = SsoClient.builder() + .credentialsProvider(AnonymousCredentialsProvider.create()) + .region(Region.of(ssoRegion)) + .build(); + + GetRoleCredentialsRequest request = GetRoleCredentialsRequest.builder() + .accountId(ssoAccountId) + .roleName(ssoRoleName) + .build(); + + Supplier supplier = () -> request.toBuilder() + .accessToken(tokenProvider.resolveAccessToken()).build(); + + + this.credentialsProvider = SsoCredentialsProvider.builder() + .ssoClient(ssoClient) + .refreshRequest(supplier) + .build(); + } + + @Override + public AwsCredentials resolveCredentials() { + return this.credentialsProvider.resolveCredentials(); + } + + @Override + public void close() { + IoUtils.closeQuietly(credentialsProvider, null); + IoUtils.closeQuietly(ssoClient, null); + } + } +} diff --git a/services/sso/src/main/java/software/amazon/awssdk/services/sso/internal/SessionCredentialsHolder.java b/services/sso/src/main/java/software/amazon/awssdk/services/sso/internal/SessionCredentialsHolder.java new file mode 100644 index 000000000000..b114917a81bb --- /dev/null +++ b/services/sso/src/main/java/software/amazon/awssdk/services/sso/internal/SessionCredentialsHolder.java @@ -0,0 +1,45 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.sso.internal; + +import java.time.Instant; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.auth.credentials.AwsSessionCredentials; + +/** + * Holder class used to atomically store a session with its expiration time. + */ +@SdkInternalApi +@ThreadSafe +public final class SessionCredentialsHolder { + + private final AwsSessionCredentials sessionCredentials; + private final Instant sessionCredentialsExpiration; + + public SessionCredentialsHolder(AwsSessionCredentials credentials, Instant expiration) { + this.sessionCredentials = credentials; + this.sessionCredentialsExpiration = expiration; + } + + public AwsSessionCredentials sessionCredentials() { + return sessionCredentials; + } + + public Instant sessionCredentialsExpiration() { + return sessionCredentialsExpiration; + } +} diff --git a/services/sso/src/main/java/software/amazon/awssdk/services/sso/internal/SsoAccessTokenProvider.java b/services/sso/src/main/java/software/amazon/awssdk/services/sso/internal/SsoAccessTokenProvider.java new file mode 100644 index 000000000000..242208af9356 --- /dev/null +++ b/services/sso/src/main/java/software/amazon/awssdk/services/sso/internal/SsoAccessTokenProvider.java @@ -0,0 +1,71 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.sso.internal; + +import static java.time.temporal.ChronoUnit.MINUTES; + +import com.fasterxml.jackson.databind.JsonNode; +import java.io.IOException; +import java.io.InputStream; +import java.io.UncheckedIOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.time.Instant; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.util.json.JacksonUtils; +import software.amazon.awssdk.services.sso.auth.ExpiredTokenException; +import software.amazon.awssdk.services.sso.auth.SsoCredentialsProvider; +import software.amazon.awssdk.utils.IoUtils; + +/** + * Resolve the access token from the cached token file. If the token has expired then throw out an exception to ask the users to + * update the token. This provider can also be replaced by any other implementation of resolving the access token. The users can + * resolve the access token in their own way and add it to the {@link SsoCredentialsProvider.Builder#refreshRequest}. + */ +@SdkInternalApi +public final class SsoAccessTokenProvider { + + private Path cachedTokenFilePath; + + public SsoAccessTokenProvider(Path cachedTokenFilePath) { + this.cachedTokenFilePath = cachedTokenFilePath; + } + + public String resolveAccessToken() { + try (InputStream cachedTokenStream = Files.newInputStream(cachedTokenFilePath)) { + return getTokenFromJson(IoUtils.toUtf8String(cachedTokenStream)); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + private String getTokenFromJson(String json) { + JsonNode jsonNode = JacksonUtils.sensitiveJsonNodeOf(json); + + if (validateToken(jsonNode.get("expiresAt").asText())) { + throw ExpiredTokenException.builder().message("The SSO session associated with this profile has expired or is" + + " otherwise invalid. To refresh this SSO session run aws sso" + + " login with the corresponding profile.").build(); + } + + return jsonNode.get("accessToken").asText(); + } + + private boolean validateToken(String expirationTime) { + return Instant.now().isAfter(Instant.parse(expirationTime).minus(15, MINUTES)); + } + +} diff --git a/services/sso/src/main/java/software/amazon/awssdk/services/sso/internal/SsoTokenFileUtils.java b/services/sso/src/main/java/software/amazon/awssdk/services/sso/internal/SsoTokenFileUtils.java new file mode 100644 index 000000000000..7ddd353bb7b2 --- /dev/null +++ b/services/sso/src/main/java/software/amazon/awssdk/services/sso/internal/SsoTokenFileUtils.java @@ -0,0 +1,78 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.sso.internal; + +import static software.amazon.awssdk.utils.UserHomeDirectoryUtils.userHomeDirectory; + +import java.nio.charset.StandardCharsets; +import java.nio.file.FileSystems; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.regex.Pattern; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.exception.SdkClientException; +import software.amazon.awssdk.utils.BinaryUtils; +import software.amazon.awssdk.utils.Validate; + +/** + * A tool class helps generating the path of cached token file. + */ +@SdkInternalApi +public class SsoTokenFileUtils { + + private static final Pattern HOME_DIRECTORY_PATTERN = + Pattern.compile("^~(/|" + Pattern.quote(FileSystems.getDefault().getSeparator()) + ").*$"); + + private SsoTokenFileUtils() { + + } + + /** + * Generate the cached file name by generating the SHA1 Hex Digest of the UTF-8 encoded start url bytes. + */ + public static Path generateCachedTokenPath(String startUrl, String tokenDirectory) { + Validate.notNull(startUrl, "The start url shouldn't be null."); + byte[] startUrlBytes = startUrl.getBytes(StandardCharsets.UTF_8); + String encodedUrl = new String(startUrlBytes, StandardCharsets.UTF_8); + return resolveProfileFilePath(Paths.get(tokenDirectory, sha1Hex(encodedUrl) + ".json").toString()); + } + + /** + * Use {@link MessageDigest} instance to encrypt the input String. + */ + private static String sha1Hex(String input) { + MessageDigest md; + try { + md = MessageDigest.getInstance("SHA-1"); + md.update(input.getBytes(StandardCharsets.UTF_8)); + } catch (NoSuchAlgorithmException e) { + throw SdkClientException.builder().message("Unable to use \"SHA-1\" algorithm.").cause(e).build(); + } + + return BinaryUtils.toHex(md.digest()); + } + + private static Path resolveProfileFilePath(String path) { + // Resolve ~ using the CLI's logic, not whatever Java decides to do with it. + if (HOME_DIRECTORY_PATTERN.matcher(path).matches()) { + path = userHomeDirectory() + path.substring(1); + } + + return Paths.get(path); + } +} diff --git a/services/sso/src/test/java/software/amazon/awssdk/services/sso/auth/SsoCredentialsProviderTest.java b/services/sso/src/test/java/software/amazon/awssdk/services/sso/auth/SsoCredentialsProviderTest.java new file mode 100644 index 000000000000..9d15a6cfc7be --- /dev/null +++ b/services/sso/src/test/java/software/amazon/awssdk/services/sso/auth/SsoCredentialsProviderTest.java @@ -0,0 +1,159 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.sso.auth; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.time.Duration; +import java.time.Instant; +import java.util.function.Supplier; +import org.junit.Test; +import org.mockito.Mockito; +import software.amazon.awssdk.auth.credentials.AwsSessionCredentials; +import software.amazon.awssdk.services.sso.SsoClient; +import software.amazon.awssdk.services.sso.auth.SsoCredentialsProvider; +import software.amazon.awssdk.services.sso.model.GetRoleCredentialsRequest; +import software.amazon.awssdk.services.sso.model.GetRoleCredentialsResponse; +import software.amazon.awssdk.services.sso.model.RoleCredentials; + +/** + * Validates the functionality of {@link SsoCredentialsProvider}. + */ +public class SsoCredentialsProviderTest { + + private SsoClient ssoClient; + + @Test + public void cachingDoesNotApplyToExpiredSession() { + callClientWithCredentialsProvider(Instant.now().minus(Duration.ofSeconds(5)), 2, false); + callClient(verify(ssoClient, times(2)), Mockito.any()); + } + + @Test + public void cachingDoesNotApplyToExpiredSession_OverridePrefetchAndStaleTimes() { + callClientWithCredentialsProvider(Instant.now().minus(Duration.ofSeconds(5)), 2, true); + callClient(verify(ssoClient, times(2)), Mockito.any()); + } + + @Test + public void cachingAppliesToNonExpiredSession() { + callClientWithCredentialsProvider(Instant.now().plus(Duration.ofHours(5)), 2, false); + callClient(verify(ssoClient, times(1)), Mockito.any()); + } + + @Test + public void cachingAppliesToNonExpiredSession_OverridePrefetchAndStaleTimes() { + callClientWithCredentialsProvider(Instant.now().plus(Duration.ofHours(5)), 2, true); + callClient(verify(ssoClient, times(1)), Mockito.any()); + } + + @Test + public void distantExpiringCredentialsUpdatedInBackground() throws InterruptedException { + callClientWithCredentialsProvider(Instant.now().plusSeconds(90), 2, false); + + Instant endCheckTime = Instant.now().plus(Duration.ofSeconds(5)); + while (Mockito.mockingDetails(ssoClient).getInvocations().size() < 2 && endCheckTime.isAfter(Instant.now())) { + Thread.sleep(100); + } + + callClient(verify(ssoClient, times(2)), Mockito.any()); + } + + @Test + public void distantExpiringCredentialsUpdatedInBackground_OverridePrefetchAndStaleTimes() throws InterruptedException { + callClientWithCredentialsProvider(Instant.now().plusSeconds(90), 2, true); + + Instant endCheckTime = Instant.now().plus(Duration.ofSeconds(5)); + while (Mockito.mockingDetails(ssoClient).getInvocations().size() < 2 && endCheckTime.isAfter(Instant.now())) { + Thread.sleep(100); + } + + callClient(verify(ssoClient, times(2)), Mockito.any()); + } + + + + private GetRoleCredentialsRequestSupplier getRequestSupplier() { + return new GetRoleCredentialsRequestSupplier(GetRoleCredentialsRequest.builder().build(), "cachedToken"); + } + + private GetRoleCredentialsResponse getResponse(RoleCredentials roleCredentials) { + return GetRoleCredentialsResponse.builder().roleCredentials(roleCredentials).build(); + } + + private GetRoleCredentialsResponse callClient(SsoClient ssoClient, GetRoleCredentialsRequest request) { + return ssoClient.getRoleCredentials(request); + } + + private void callClientWithCredentialsProvider(Instant credentialsExpirationDate, int numTimesInvokeCredentialsProvider, + boolean overrideStaleAndPrefetchTimes) { + ssoClient = mock(SsoClient.class); + RoleCredentials credentials = RoleCredentials.builder().accessKeyId("a").secretAccessKey("b").sessionToken("c") + .expiration(credentialsExpirationDate.toEpochMilli()).build(); + + Supplier supplier = getRequestSupplier(); + GetRoleCredentialsResponse response = getResponse(credentials); + + when(ssoClient.getRoleCredentials(supplier.get())).thenReturn(response); + + SsoCredentialsProvider.Builder ssoCredentialsProviderBuilder = SsoCredentialsProvider.builder().refreshRequest(supplier); + + if(overrideStaleAndPrefetchTimes) { + ssoCredentialsProviderBuilder.staleTime(Duration.ofMinutes(2)); + ssoCredentialsProviderBuilder.prefetchTime(Duration.ofMinutes(4)); + } + + try (SsoCredentialsProvider credentialsProvider = ssoCredentialsProviderBuilder.ssoClient(ssoClient).build()) { + if(overrideStaleAndPrefetchTimes) { + assertThat(credentialsProvider.staleTime()).as("stale time").isEqualTo(Duration.ofMinutes(2)); + assertThat(credentialsProvider.prefetchTime()).as("prefetch time").isEqualTo(Duration.ofMinutes(4)); + } else { + assertThat(credentialsProvider.staleTime()).as("stale time").isEqualTo(Duration.ofMinutes(1)); + assertThat(credentialsProvider.prefetchTime()).as("prefetch time").isEqualTo(Duration.ofMinutes(5)); + } + + for (int i = 0; i < numTimesInvokeCredentialsProvider; ++i) { + AwsSessionCredentials actualCredentials = (AwsSessionCredentials) credentialsProvider.resolveCredentials(); + assertThat(actualCredentials.accessKeyId()).isEqualTo("a"); + assertThat(actualCredentials.secretAccessKey()).isEqualTo("b"); + assertThat(actualCredentials.sessionToken()).isEqualTo("c"); + } + } + + } + + private static final class GetRoleCredentialsRequestSupplier implements Supplier { + private final GetRoleCredentialsRequest request; + private final String cachedToken; + + GetRoleCredentialsRequestSupplier(GetRoleCredentialsRequest request, + String cachedToken) { + this.request = request; + this.cachedToken = cachedToken; + } + + @Override + public Object get() { + return request.toBuilder().accessToken(cachedToken).build(); + } + + } + +} \ No newline at end of file diff --git a/services/sso/src/test/java/software/amazon/awssdk/services/sso/auth/SsoProfileCredentialsProviderFactoryTest.java b/services/sso/src/test/java/software/amazon/awssdk/services/sso/auth/SsoProfileCredentialsProviderFactoryTest.java new file mode 100644 index 000000000000..bcc47d576ccd --- /dev/null +++ b/services/sso/src/test/java/software/amazon/awssdk/services/sso/auth/SsoProfileCredentialsProviderFactoryTest.java @@ -0,0 +1,75 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.sso.auth; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.google.common.collect.ImmutableList; +import com.google.common.jimfs.Configuration; +import com.google.common.jimfs.Jimfs; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.FileSystem; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.HashMap; +import java.util.Map; +import org.junit.Test; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.profiles.Profile; +import software.amazon.awssdk.services.sso.internal.SsoAccessTokenProvider; + +/** + * Validate the code path of creating the {@link SsoCredentialsProvider} with {@link SsoProfileCredentialsProviderFactory}. + */ +public class SsoProfileCredentialsProviderFactoryTest { + + @Test + public void createSsoCredentialsProviderWithFactorySucceed() throws IOException { + String startUrl = "https//d-abc123.awsapps.com/start"; + String generatedTokenFileName = "6a888bdb653a4ba345dd68f21b896ec2e218c6f4.json"; + + Map properties = new HashMap<>(); + properties.put("sso_account_id", "accountId"); + properties.put("sso_region", "region"); + properties.put("sso_role_name", "roleName"); + properties.put("sso_start_url", "https//d-abc123.awsapps.com/start"); + Profile profile = Profile.builder().name("foo").properties(properties).build(); + + String tokenFile = "{\n" + + "\"accessToken\": \"base64string\",\n" + + "\"expiresAt\": \"2090-01-01T00:00:00Z\",\n" + + "\"region\": \"us-west-2\", \n" + + "\"startUrl\": \""+ startUrl +"\"\n" + + "}"; + SsoAccessTokenProvider tokenProvider = new SsoAccessTokenProvider( + prepareTestCachedTokenFile(tokenFile, generatedTokenFileName)); + + SsoProfileCredentialsProviderFactory factory = new SsoProfileCredentialsProviderFactory(); + assertThat(factory.create(profile, tokenProvider)).isInstanceOf(AwsCredentialsProvider.class); + } + + private Path prepareTestCachedTokenFile(String tokenFileContent, String generatedTokenFileName) throws IOException { + FileSystem fs = Jimfs.newFileSystem(Configuration.unix()); + Path fileDirectory = fs.getPath("./foo"); + + Files.createDirectory(fileDirectory); + Path cachedTokenFilePath = fileDirectory.resolve(generatedTokenFileName); + Files.write(cachedTokenFilePath, ImmutableList.of(tokenFileContent), StandardCharsets.UTF_8); + + return cachedTokenFilePath; + } +} \ No newline at end of file diff --git a/services/sso/src/test/java/software/amazon/awssdk/services/sso/auth/SsoProfileTest.java b/services/sso/src/test/java/software/amazon/awssdk/services/sso/auth/SsoProfileTest.java new file mode 100644 index 000000000000..ee08c541eaef --- /dev/null +++ b/services/sso/src/test/java/software/amazon/awssdk/services/sso/auth/SsoProfileTest.java @@ -0,0 +1,94 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.sso.auth; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import org.junit.Test; +import software.amazon.awssdk.auth.credentials.internal.ProfileCredentialsUtils; +import software.amazon.awssdk.profiles.ProfileFile; +import software.amazon.awssdk.utils.StringInputStream; + +/** + * Validate the completeness of sso profile properties consumed by the {@link ProfileCredentialsUtils}. + */ +public class SsoProfileTest { + + @Test + public void createSsoCredentialsProvider_SsoAccountIdMissing_throwException() { + String profileContent = "[profile foo]\n" + + "sso_region=us-east-1\n" + + "sso_role_name=SampleRole\n" + + "sso_start_url=https://d-abc123.awsapps.com/start-beta\n"; + ProfileFile profiles = ProfileFile.builder() + .content(new StringInputStream(profileContent)) + .type(ProfileFile.Type.CONFIGURATION) + .build(); + assertThat(profiles.profile("foo")).hasValueSatisfying(profile -> { + assertThatThrownBy(() -> new ProfileCredentialsUtils(profile, profiles::profile).credentialsProvider()) + .hasMessageContaining("Profile property 'sso_account_id' was not configured"); + }); + } + + @Test + public void createSsoCredentialsProvider_SsoRegionMissing_throwException() { + String profileContent = "[profile foo]\n" + + "sso_account_id=012345678901\n" + + "sso_role_name=SampleRole\n" + + "sso_start_url=https://d-abc123.awsapps.com/start-beta\n"; + ProfileFile profiles = ProfileFile.builder() + .content(new StringInputStream(profileContent)) + .type(ProfileFile.Type.CONFIGURATION) + .build(); + assertThat(profiles.profile("foo")).hasValueSatisfying(profile -> { + assertThatThrownBy(() -> new ProfileCredentialsUtils(profile, profiles::profile).credentialsProvider()) + .hasMessageContaining("Profile property 'sso_region' was not configured"); + }); + } + + @Test + public void createSsoCredentialsProvider_SsoRoleNameMissing_throwException() { + String profileContent = "[profile foo]\n" + + "sso_account_id=012345678901\n" + + "sso_region=us-east-1\n" + + "sso_start_url=https://d-abc123.awsapps.com/start-beta\n"; + ProfileFile profiles = ProfileFile.builder() + .content(new StringInputStream(profileContent)) + .type(ProfileFile.Type.CONFIGURATION) + .build(); + assertThat(profiles.profile("foo")).hasValueSatisfying(profile -> { + assertThatThrownBy(() -> new ProfileCredentialsUtils(profile, profiles::profile).credentialsProvider()) + .hasMessageContaining("Profile property 'sso_role_name' was not configured"); + }); + } + + @Test + public void createSsoCredentialsProvider_SsoStartUrlMissing_throwException() { + String profileContent = "[profile foo]\n" + + "sso_account_id=012345678901\n" + + "sso_region=us-east-1\n" + + "sso_role_name=SampleRole\n"; + ProfileFile profiles = ProfileFile.builder() + .content(new StringInputStream(profileContent)) + .type(ProfileFile.Type.CONFIGURATION) + .build(); + assertThat(profiles.profile("foo")).hasValueSatisfying(profile -> { + assertThatThrownBy(() -> new ProfileCredentialsUtils(profile, profiles::profile).credentialsProvider()) + .hasMessageContaining("Profile property 'sso_start_url' was not configured"); + }); + } +} diff --git a/services/sso/src/test/java/software/amazon/awssdk/services/sso/internal/SsoAccessTokenProviderTest.java b/services/sso/src/test/java/software/amazon/awssdk/services/sso/internal/SsoAccessTokenProviderTest.java new file mode 100644 index 000000000000..678bd8f129bb --- /dev/null +++ b/services/sso/src/test/java/software/amazon/awssdk/services/sso/internal/SsoAccessTokenProviderTest.java @@ -0,0 +1,145 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.sso.internal; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import com.google.common.collect.ImmutableList; +import com.google.common.jimfs.Configuration; +import com.google.common.jimfs.Jimfs; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.FileSystem; +import java.nio.file.Files; +import java.nio.file.Path; +import org.junit.Test; + +/** + * Check if the behavior of {@link SsoAccessTokenProvider} is correct while consuming different formats of cached token + * file. + */ +public class SsoAccessTokenProviderTest { + + private static final String START_URL = "https//d-abc123.awsapps.com/start"; + private static final String GENERATED_TOKEN_FILE_NAME = "6a888bdb653a4ba345dd68f21b896ec2e218c6f4.json"; + private static final String WRONG_TOKEN_FILE_NAME = "wrong-token-file.json"; + + @Test + public void cachedTokenFile_correctFormat_resolveAccessTokenCorrectly() throws IOException { + String tokenFile = "{\n" + + "\"accessToken\": \"base64string\",\n" + + "\"expiresAt\": \"2090-01-01T00:00:00Z\",\n" + + "\"region\": \"us-west-2\", \n" + + "\"startUrl\": \""+ START_URL +"\"\n" + + "}"; + SsoAccessTokenProvider provider = new SsoAccessTokenProvider( + prepareTestCachedTokenFile(tokenFile, GENERATED_TOKEN_FILE_NAME)); + assertThat(provider.resolveAccessToken()).isEqualTo("base64string"); + } + + @Test + public void cachedTokenFile_accessTokenMissing_throwNullPointerException() throws IOException { + String tokenFile = "{\n" + + "\"expiresAt\": \"2090-01-01T00:00:00Z\",\n" + + "\"region\": \"us-west-2\", \n" + + "\"startUrl\": \""+ START_URL +"\"\n" + + "}"; + SsoAccessTokenProvider provider = new SsoAccessTokenProvider( + prepareTestCachedTokenFile(tokenFile, GENERATED_TOKEN_FILE_NAME)); + assertThatThrownBy(provider::resolveAccessToken).isInstanceOf(NullPointerException.class); + } + + @Test + public void cachedTokenFile_expiresAtMissing_throwNullPointerException() throws IOException { + String tokenFile = "{\n" + + "\"accessToken\": \"base64string\",\n" + + "\"region\": \"us-west-2\", \n" + + "\"startUrl\": \""+ START_URL +"\"\n" + + "}"; + + SsoAccessTokenProvider provider = new SsoAccessTokenProvider( + prepareTestCachedTokenFile(tokenFile, GENERATED_TOKEN_FILE_NAME)); + assertThatThrownBy(provider::resolveAccessToken).isInstanceOf(NullPointerException.class); + } + + @Test + public void cachedTokenFile_optionalRegionMissing_resolveAccessTokenCorrectly() throws IOException { + String tokenFile = "{\n" + + "\"accessToken\": \"base64string\",\n" + + "\"expiresAt\": \"2090-01-01T00:00:00Z\",\n" + + "\"startUrl\": \""+ START_URL +"\"\n" + + "}"; + SsoAccessTokenProvider provider = new SsoAccessTokenProvider( + prepareTestCachedTokenFile(tokenFile, GENERATED_TOKEN_FILE_NAME)); + assertThat(provider.resolveAccessToken()).isEqualTo("base64string"); + } + + @Test + public void cachedTokenFile_optionalStartUrlMissing_resolveAccessTokenCorrectly() throws IOException { + String tokenFile = "{\n" + + "\"accessToken\": \"base64string\",\n" + + "\"expiresAt\": \"2090-01-01T00:00:00Z\",\n" + + "\"region\": \"us-west-2\"\n" + + "}"; + SsoAccessTokenProvider provider = new SsoAccessTokenProvider( + prepareTestCachedTokenFile(tokenFile, GENERATED_TOKEN_FILE_NAME)); + assertThat(provider.resolveAccessToken()).isEqualTo("base64string"); + } + + @Test + public void cachedTokenFile_alreadyExpired_resolveAccessTokenCorrectly() throws IOException { + String tokenFile = "{\n" + + "\"accessToken\": \"base64string\",\n" + + "\"expiresAt\": \"2019-01-01T00:00:00Z\",\n" + + "\"region\": \"us-west-2\"\n" + + "}"; + SsoAccessTokenProvider provider = new SsoAccessTokenProvider( + prepareTestCachedTokenFile(tokenFile, GENERATED_TOKEN_FILE_NAME)); + assertThatThrownBy(provider::resolveAccessToken).hasMessageContaining("The SSO session associated with this profile " + + "has expired or is otherwise invalid."); + } + + @Test + public void cachedTokenFile_tokenFileNotExist_throwNullPointerException() throws IOException { + String tokenFile = "{\n" + + "\"accessToken\": \"base64string\",\n" + + "\"expiresAt\": \"2019-01-01T00:00:00Z\",\n" + + "\"region\": \"us-west-2\"\n" + + "}"; + prepareTestCachedTokenFile(tokenFile, WRONG_TOKEN_FILE_NAME); + SsoAccessTokenProvider provider = new SsoAccessTokenProvider(createTestCachedTokenFilePath( + Jimfs.newFileSystem(Configuration.unix()).getPath("./foo"), GENERATED_TOKEN_FILE_NAME)); + assertThatThrownBy(provider::resolveAccessToken).isInstanceOf(UncheckedIOException.class); + } + + private Path prepareTestCachedTokenFile(String tokenFileContent, String generatedTokenFileName) throws IOException { + FileSystem fs = Jimfs.newFileSystem(Configuration.unix()); + Path fileDirectory = fs.getPath("./foo"); + + Files.createDirectory(fileDirectory); + Path cachedTokenFilePath = createTestCachedTokenFilePath(fileDirectory, generatedTokenFileName); + Files.write(cachedTokenFilePath, ImmutableList.of(tokenFileContent), StandardCharsets.UTF_8); + + return cachedTokenFilePath; + } + + private Path createTestCachedTokenFilePath(Path directory, String tokenFileName) { + return directory.resolve(tokenFileName); + } + +} diff --git a/services/sso/src/test/java/software/amazon/awssdk/services/sso/internal/SsoTokenFileUtilsTest.java b/services/sso/src/test/java/software/amazon/awssdk/services/sso/internal/SsoTokenFileUtilsTest.java new file mode 100644 index 000000000000..1028fa397759 --- /dev/null +++ b/services/sso/src/test/java/software/amazon/awssdk/services/sso/internal/SsoTokenFileUtilsTest.java @@ -0,0 +1,34 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.sso.internal; + +import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.services.sso.internal.SsoTokenFileUtils.generateCachedTokenPath; +import static software.amazon.awssdk.utils.UserHomeDirectoryUtils.userHomeDirectory; + +import org.junit.Test; + +public class SsoTokenFileUtilsTest { + + @Test + public void generateTheCorrectPathTest() { + String startUrl = "https//d-abc123.awsapps.com/start"; + String directory = "~/.aws/sso/cache"; + assertThat(generateCachedTokenPath(startUrl, directory).toString()) + .isEqualTo(userHomeDirectory() + "/.aws/sso/cache/6a888bdb653a4ba345dd68f21b896ec2e218c6f4.json"); + } + +} \ No newline at end of file diff --git a/services/ssoadmin/pom.xml b/services/ssoadmin/pom.xml index d6614b9993e8..9c4d7db95f0e 100644 --- a/services/ssoadmin/pom.xml +++ b/services/ssoadmin/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT ssoadmin AWS Java SDK :: Services :: SSO Admin diff --git a/services/ssoadmin/src/main/resources/codegen-resources/service-2.json b/services/ssoadmin/src/main/resources/codegen-resources/service-2.json index 9f6ebc321edd..e886ccab6c71 100644 --- a/services/ssoadmin/src/main/resources/codegen-resources/service-2.json +++ b/services/ssoadmin/src/main/resources/codegen-resources/service-2.json @@ -52,6 +52,24 @@ ], "documentation":"

Assigns access to a principal for a specified AWS account using a specified permission set.

The term principal here refers to a user or group that is defined in AWS SSO.

As part of a successful CreateAccountAssignment call, the specified permission set will automatically be provisioned to the account in the form of an IAM policy attached to the SSO-created IAM role. If the permission set is subsequently updated, the corresponding IAM policies attached to roles in your accounts will not be updated automatically. In this case, you will need to call ProvisionPermissionSet to make these updates.

" }, + "CreateInstanceAccessControlAttributeConfiguration":{ + "name":"CreateInstanceAccessControlAttributeConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateInstanceAccessControlAttributeConfigurationRequest"}, + "output":{"shape":"CreateInstanceAccessControlAttributeConfigurationResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Enables the attributes-based access control (ABAC) feature for the specified AWS SSO instance. You can also specify new attributes to add to your ABAC configuration during the enabling process. For more information about ABAC, see Attribute-Based Access Control in the AWS SSO User Guide.

" + }, "CreatePermissionSet":{ "name":"CreatePermissionSet", "http":{ @@ -107,6 +125,24 @@ ], "documentation":"

Deletes the inline policy from a specified permission set.

" }, + "DeleteInstanceAccessControlAttributeConfiguration":{ + "name":"DeleteInstanceAccessControlAttributeConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteInstanceAccessControlAttributeConfigurationRequest"}, + "output":{"shape":"DeleteInstanceAccessControlAttributeConfigurationResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Disables the attributes-based access control (ABAC) feature for the specified AWS SSO instance and deletes all of the attribute mappings that have been configured. Once deleted, any attributes that are received from an identity source and any custom attributes you have previously configured will not be passed. For more information about ABAC, see Attribute-Based Access Control in the AWS SSO User Guide.

" + }, "DeletePermissionSet":{ "name":"DeletePermissionSet", "http":{ @@ -159,6 +195,23 @@ ], "documentation":"

Describes the status of the assignment deletion request.

" }, + "DescribeInstanceAccessControlAttributeConfiguration":{ + "name":"DescribeInstanceAccessControlAttributeConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeInstanceAccessControlAttributeConfigurationRequest"}, + "output":{"shape":"DescribeInstanceAccessControlAttributeConfigurationResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns the list of AWS SSO identity store attributes that have been configured to work with attributes-based access control (ABAC) for the specified AWS SSO instance. This will not return attributes configured and sent by an external identity provider. For more information about ABAC, see Attribute-Based Access Control in the AWS SSO User Guide.

" + }, "DescribePermissionSet":{ "name":"DescribePermissionSet", "http":{ @@ -471,6 +524,24 @@ ], "documentation":"

Disassociates a set of tags from a specified resource.

" }, + "UpdateInstanceAccessControlAttributeConfiguration":{ + "name":"UpdateInstanceAccessControlAttributeConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateInstanceAccessControlAttributeConfigurationRequest"}, + "output":{"shape":"UpdateInstanceAccessControlAttributeConfigurationResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Updates the AWS SSO identity store attributes to use with the AWS SSO instance for attributes-based access control (ABAC). When using an external identity provider as an identity source, you can pass attributes through the SAML assertion as an alternative to configuring attributes from the AWS SSO identity store. If a SAML assertion passes any of these attributes, AWS SSO will replace the attribute value with the value from the AWS SSO identity store. For more information about ABAC, see Attribute-Based Access Control in the AWS SSO User Guide.

" + }, "UpdatePermissionSet":{ "name":"UpdatePermissionSet", "http":{ @@ -491,6 +562,59 @@ } }, "shapes":{ + "AccessControlAttribute":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{ + "shape":"AccessControlAttributeKey", + "documentation":"

The name of the attribute associated with your identities in your identity source. This is used to map a specified attribute in your identity source with an attribute in AWS SSO.

" + }, + "Value":{ + "shape":"AccessControlAttributeValue", + "documentation":"

The value used for mapping a specified attribute to an identity source.

" + } + }, + "documentation":"

These are AWS SSO identity store attributes that you can configure for use in attributes-based access control (ABAC). You can create permission policies that determine who can access your AWS resources based upon the configured attribute value(s). When you enable ABAC and specify AccessControlAttributes, AWS SSO passes the attribute(s) value of the authenticated user into IAM for use in policy evaluation.

" + }, + "AccessControlAttributeKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[\\p{L}\\p{Z}\\p{N}_.:\\/=+\\-@]+" + }, + "AccessControlAttributeList":{ + "type":"list", + "member":{"shape":"AccessControlAttribute"}, + "max":50, + "min":0 + }, + "AccessControlAttributeValue":{ + "type":"structure", + "required":["Source"], + "members":{ + "Source":{ + "shape":"AccessControlAttributeValueSourceList", + "documentation":"

The identity source to use when mapping a specified attribute to AWS SSO.

" + } + }, + "documentation":"

The value used for mapping a specified attribute to an identity source.

" + }, + "AccessControlAttributeValueSource":{ + "type":"string", + "max":256, + "min":0, + "pattern":"[\\p{L}\\p{Z}\\p{N}_.:\\/=+\\-@\\[\\]\\{\\}\\$\\\\\"]*" + }, + "AccessControlAttributeValueSourceList":{ + "type":"list", + "member":{"shape":"AccessControlAttributeValueSource"}, + "max":1, + "min":1 + }, "AccessDeniedException":{ "type":"structure", "members":{ @@ -698,6 +822,28 @@ } } }, + "CreateInstanceAccessControlAttributeConfigurationRequest":{ + "type":"structure", + "required":[ + "InstanceArn", + "InstanceAccessControlAttributeConfiguration" + ], + "members":{ + "InstanceArn":{ + "shape":"InstanceArn", + "documentation":"

The ARN of the SSO instance under which the operation will be executed.

" + }, + "InstanceAccessControlAttributeConfiguration":{ + "shape":"InstanceAccessControlAttributeConfiguration", + "documentation":"

Specifies the AWS SSO identity store attributes to add to your ABAC configuration. When using an external identity provider as an identity source, you can pass attributes through the SAML assertion as an alternative to configuring attributes from the AWS SSO identity store. If a SAML assertion passes any of these attributes, AWS SSO will replace the attribute value with the value from the AWS SSO identity store.

" + } + } + }, + "CreateInstanceAccessControlAttributeConfigurationResponse":{ + "type":"structure", + "members":{ + } + }, "CreatePermissionSetRequest":{ "type":"structure", "required":[ @@ -809,6 +955,21 @@ "members":{ } }, + "DeleteInstanceAccessControlAttributeConfigurationRequest":{ + "type":"structure", + "required":["InstanceArn"], + "members":{ + "InstanceArn":{ + "shape":"InstanceArn", + "documentation":"

The ARN of the SSO instance under which the operation will be executed.

" + } + } + }, + "DeleteInstanceAccessControlAttributeConfigurationResponse":{ + "type":"structure", + "members":{ + } + }, "DeletePermissionSetRequest":{ "type":"structure", "required":[ @@ -883,6 +1044,33 @@ } } }, + "DescribeInstanceAccessControlAttributeConfigurationRequest":{ + "type":"structure", + "required":["InstanceArn"], + "members":{ + "InstanceArn":{ + "shape":"InstanceArn", + "documentation":"

The ARN of the SSO instance under which the operation will be executed.

" + } + } + }, + "DescribeInstanceAccessControlAttributeConfigurationResponse":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"InstanceAccessControlAttributeConfigurationStatus", + "documentation":"

The status of the attribute configuration process.

" + }, + "StatusReason":{ + "shape":"InstanceAccessControlAttributeConfigurationStatusReason", + "documentation":"

Provides more details about the current status of the specified attribute.

" + }, + "InstanceAccessControlAttributeConfiguration":{ + "shape":"InstanceAccessControlAttributeConfiguration", + "documentation":"

Gets the list of AWS SSO identity store attributes added to your ABAC configuration.

" + } + } + }, "DescribePermissionSetProvisioningStatusRequest":{ "type":"structure", "required":[ @@ -1006,6 +1194,26 @@ "min":1, "pattern":"^[a-zA-Z0-9-]*" }, + "InstanceAccessControlAttributeConfiguration":{ + "type":"structure", + "required":["AccessControlAttributes"], + "members":{ + "AccessControlAttributes":{ + "shape":"AccessControlAttributeList", + "documentation":"

Lists the attributes that are configured for ABAC in the specified AWS SSO instance.

" + } + }, + "documentation":"

Specifies the attributes to add to your attribute-based access control (ABAC) configuration.

" + }, + "InstanceAccessControlAttributeConfigurationStatus":{ + "type":"string", + "enum":[ + "ENABLED", + "CREATION_IN_PROGRESS", + "CREATION_FAILED" + ] + }, + "InstanceAccessControlAttributeConfigurationStatusReason":{"type":"string"}, "InstanceArn":{ "type":"string", "max":1224, @@ -1779,6 +1987,28 @@ "members":{ } }, + "UpdateInstanceAccessControlAttributeConfigurationRequest":{ + "type":"structure", + "required":[ + "InstanceArn", + "InstanceAccessControlAttributeConfiguration" + ], + "members":{ + "InstanceArn":{ + "shape":"InstanceArn", + "documentation":"

The ARN of the SSO instance under which the operation will be executed.

" + }, + "InstanceAccessControlAttributeConfiguration":{ + "shape":"InstanceAccessControlAttributeConfiguration", + "documentation":"

Updates the attributes for your ABAC configuration.

" + } + } + }, + "UpdateInstanceAccessControlAttributeConfigurationResponse":{ + "type":"structure", + "members":{ + } + }, "UpdatePermissionSetRequest":{ "type":"structure", "required":[ diff --git a/services/ssooidc/pom.xml b/services/ssooidc/pom.xml index 348ed6558034..9bf42f4c6cf1 100644 --- a/services/ssooidc/pom.xml +++ b/services/ssooidc/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT ssooidc AWS Java SDK :: Services :: SSO OIDC diff --git a/services/ssooidc/src/main/resources/codegen-resources/customization.config b/services/ssooidc/src/main/resources/codegen-resources/customization.config new file mode 100644 index 000000000000..919e7a0f70fc --- /dev/null +++ b/services/ssooidc/src/main/resources/codegen-resources/customization.config @@ -0,0 +1,125 @@ +{ + "shapeModifiers": { + // Do not keep adding to this list. Make the service team do the right thing across all SDKs. + "AccessDeniedException": { + "modify": [ + { + "error_description": { + "emitPropertyName": "errorDescription", + "existingNameDeprecated": true + } + } + ] + }, + "AuthorizationPendingException": { + "modify": [ + { + "error_description": { + "emitPropertyName": "errorDescription", + "existingNameDeprecated": true + } + } + ] + }, + "ExpiredTokenException": { + "modify": [ + { + "error_description": { + "emitPropertyName": "errorDescription", + "existingNameDeprecated": true + } + } + ] + }, + "InternalServerException": { + "modify": [ + { + "error_description": { + "emitPropertyName": "errorDescription", + "existingNameDeprecated": true + } + } + ] + }, + "InvalidClientException": { + "modify": [ + { + "error_description": { + "emitPropertyName": "errorDescription", + "existingNameDeprecated": true + } + } + ] + }, + "InvalidClientMetadataException": { + "modify": [ + { + "error_description": { + "emitPropertyName": "errorDescription", + "existingNameDeprecated": true + } + } + ] + }, + "InvalidGrantException": { + "modify": [ + { + "error_description": { + "emitPropertyName": "errorDescription", + "existingNameDeprecated": true + } + } + ] + }, + "InvalidRequestException": { + "modify": [ + { + "error_description": { + "emitPropertyName": "errorDescription", + "existingNameDeprecated": true + } + } + ] + }, + "InvalidScopeException": { + "modify": [ + { + "error_description": { + "emitPropertyName": "errorDescription", + "existingNameDeprecated": true + } + } + ] + }, + "SlowDownException": { + "modify": [ + { + "error_description": { + "emitPropertyName": "errorDescription", + "existingNameDeprecated": true + } + } + ] + }, + "UnauthorizedClientException": { + "modify": [ + { + "error_description": { + "emitPropertyName": "errorDescription", + "existingNameDeprecated": true + } + } + ] + }, + "UnsupportedGrantTypeException": { + "modify": [ + { + "error_description": { + "emitPropertyName": "errorDescription", + "existingNameDeprecated": true + } + } + ] + } + } +} \ No newline at end of file diff --git a/services/storagegateway/pom.xml b/services/storagegateway/pom.xml index a3e40ae52cf1..96e04060b6eb 100644 --- a/services/storagegateway/pom.xml +++ b/services/storagegateway/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT storagegateway AWS Java SDK :: Services :: AWS Storage Gateway diff --git a/services/storagegateway/src/main/resources/codegen-resources/paginators-1.json b/services/storagegateway/src/main/resources/codegen-resources/paginators-1.json index 78ba8622c246..921dfb4f541d 100644 --- a/services/storagegateway/src/main/resources/codegen-resources/paginators-1.json +++ b/services/storagegateway/src/main/resources/codegen-resources/paginators-1.json @@ -57,6 +57,12 @@ "output_token": "Marker", "result_key": "Tags" }, + "ListTapePools": { + "input_token": "Marker", + "limit_key": "Limit", + "output_token": "Marker", + "result_key": "PoolInfos" + }, "ListTapes": { "input_token": "Marker", "limit_key": "Limit", diff --git a/services/storagegateway/src/main/resources/codegen-resources/service-2.json b/services/storagegateway/src/main/resources/codegen-resources/service-2.json index 8468f4f927dd..1b32f80415cb 100644 --- a/services/storagegateway/src/main/resources/codegen-resources/service-2.json +++ b/services/storagegateway/src/main/resources/codegen-resources/service-2.json @@ -434,6 +434,20 @@ ], "documentation":"

Returns the bandwidth rate limits of a gateway. By default, these limits are not set, which means no bandwidth rate limiting is in effect. This operation is supported for the stored volume, cached volume, and tape gateway types.

This operation only returns a value for a bandwidth rate limit only if the limit is set. If no limits are set for the gateway, then this operation returns only the gateway ARN in the response body. To specify which gateway to describe, use the Amazon Resource Name (ARN) of the gateway in your request.

" }, + "DescribeBandwidthRateLimitSchedule":{ + "name":"DescribeBandwidthRateLimitSchedule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeBandwidthRateLimitScheduleInput"}, + "output":{"shape":"DescribeBandwidthRateLimitScheduleOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ], + "documentation":"

Returns information about the bandwidth rate limit schedule of a gateway. By default, gateways do not have bandwidth rate limit schedules, which means no bandwidth rate limiting is in effect. This operation is supported only in the volume and tape gateway types.

This operation returns information about a gateway's bandwidth rate limit schedule. A bandwidth rate limit schedule consists of one or more bandwidth rate limit intervals. A bandwidth rate limit interval defines a period of time on one or more days of the week, during which bandwidth rate limits are specified for uploading, downloading, or both.

A bandwidth rate limit interval consists of one or more days of the week, a start hour and minute, an ending hour and minute, and bandwidth rate limits for uploading and downloading

If no bandwidth rate limit schedule intervals are set for the gateway, this operation returns an empty response. To specify which gateway to describe, use the Amazon Resource Name (ARN) of the gateway in your request.

" + }, "DescribeCache":{ "name":"DescribeCache", "http":{ @@ -1022,6 +1036,20 @@ ], "documentation":"

Updates the bandwidth rate limits of a gateway. You can update both the upload and download bandwidth rate limit or specify only one of the two. If you don't set a bandwidth rate limit, the existing rate limit remains. This operation is supported for the stored volume, cached volume, and tape gateway types.

By default, a gateway's bandwidth rate limits are not set. If you don't set any limit, the gateway does not have any limitations on its bandwidth usage and could potentially use the maximum available bandwidth.

To specify which gateway to update, use the Amazon Resource Name (ARN) of the gateway in your request.

" }, + "UpdateBandwidthRateLimitSchedule":{ + "name":"UpdateBandwidthRateLimitSchedule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateBandwidthRateLimitScheduleInput"}, + "output":{"shape":"UpdateBandwidthRateLimitScheduleOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ], + "documentation":"

Updates the bandwidth rate limit schedule for a specified gateway. By default, gateways do not have bandwidth rate limit schedules, which means no bandwidth rate limiting is in effect. Use this to initiate or update a gateway's bandwidth rate limit schedule. This operation is supported in the volume and tape gateway types.

" + }, "UpdateChapCredentials":{ "name":"UpdateChapCredentials", "http":{ @@ -1090,7 +1118,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Updates a Network File System (NFS) file share. This operation is only supported in the file gateway type.

To leave a file share field unchanged, set the corresponding input field to null.

Updates the following file share setting:

  • Default storage class for your S3 bucket

  • Metadata defaults for your S3 bucket

  • Allowed NFS clients for your file share

  • Squash settings

  • Write status of your file share

To leave a file share field unchanged, set the corresponding input field to null. This operation is only supported in file gateways.

" + "documentation":"

Updates a Network File System (NFS) file share. This operation is only supported in the file gateway type.

To leave a file share field unchanged, set the corresponding input field to null.

Updates the following file share settings:

  • Default storage class for your S3 bucket

  • Metadata defaults for your S3 bucket

  • Allowed NFS clients for your file share

  • Squash settings

  • Write status of your file share

" }, "UpdateSMBFileShare":{ "name":"UpdateSMBFileShare", @@ -1104,7 +1132,21 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Updates a Server Message Block (SMB) file share.

To leave a file share field unchanged, set the corresponding input field to null. This operation is only supported for file gateways.

File gateways require AWS Security Token Service (AWS STS) to be activated to enable you to create a file share. Make sure that AWS STS is activated in the AWS Region you are creating your file gateway in. If AWS STS is not activated in this AWS Region, activate it. For information about how to activate AWS STS, see Activating and deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide.

File gateways don't support creating hard or symbolic links on a file share.

" + "documentation":"

Updates a Server Message Block (SMB) file share. This operation is only supported for file gateways.

To leave a file share field unchanged, set the corresponding input field to null.

File gateways require AWS Security Token Service (AWS STS) to be activated to enable you to create a file share. Make sure that AWS STS is activated in the AWS Region you are creating your file gateway in. If AWS STS is not activated in this AWS Region, activate it. For information about how to activate AWS STS, see Activating and deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide.

File gateways don't support creating hard or symbolic links on a file share.

" + }, + "UpdateSMBFileShareVisibility":{ + "name":"UpdateSMBFileShareVisibility", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateSMBFileShareVisibilityInput"}, + "output":{"shape":"UpdateSMBFileShareVisibilityOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ], + "documentation":"

Controls whether the shares on a gateway are visible in a net view or browse list.

" }, "UpdateSMBSecurityStrategy":{ "name":"UpdateSMBSecurityStrategy", @@ -1458,6 +1500,53 @@ "type":"long", "min":102400 }, + "BandwidthRateLimitInterval":{ + "type":"structure", + "required":[ + "StartHourOfDay", + "StartMinuteOfHour", + "EndHourOfDay", + "EndMinuteOfHour", + "DaysOfWeek" + ], + "members":{ + "StartHourOfDay":{ + "shape":"HourOfDay", + "documentation":"

The hour of the day to start the bandwidth rate limit interval.

" + }, + "StartMinuteOfHour":{ + "shape":"MinuteOfHour", + "documentation":"

The minute of the hour to start the bandwidth rate limit interval. The interval begins at the start of that minute. To begin an interval exactly at the start of the hour, use the value 0.

" + }, + "EndHourOfDay":{ + "shape":"HourOfDay", + "documentation":"

The hour of the day to end the bandwidth rate limit interval.

" + }, + "EndMinuteOfHour":{ + "shape":"MinuteOfHour", + "documentation":"

The minute of the hour to end the bandwidth rate limit interval.

The bandwidth rate limit interval ends at the end of the minute. To end an interval at the end of an hour, use the value 59.

" + }, + "DaysOfWeek":{ + "shape":"DaysOfWeek", + "documentation":"

The days of the week component of the bandwidth rate limit interval, represented as ordinal numbers from 0 to 6, where 0 represents Sunday and 6 Saturday.

" + }, + "AverageUploadRateLimitInBitsPerSec":{ + "shape":"BandwidthUploadRateLimit", + "documentation":"

The average upload rate limit component of the bandwidth rate limit interval, in bits per second. This field does not appear in the response if the upload rate limit is not set.

" + }, + "AverageDownloadRateLimitInBitsPerSec":{ + "shape":"BandwidthDownloadRateLimit", + "documentation":"

The average download rate limit component of the bandwidth rate limit interval, in bits per second. This field does not appear in the response if the download rate limit is not set.

" + } + }, + "documentation":"

Describes a bandwidth rate limit interval for a gateway. A bandwidth rate limit schedule consists of one or more bandwidth rate limit intervals. A bandwidth rate limit interval defines a period of time on one or more days of the week, during which bandwidth rate limits are specified for uploading, downloading, or both.

" + }, + "BandwidthRateLimitIntervals":{ + "type":"list", + "member":{"shape":"BandwidthRateLimitInterval"}, + "max":20, + "min":0 + }, "BandwidthType":{ "type":"string", "max":25, @@ -1774,6 +1863,10 @@ "CacheAttributes":{ "shape":"CacheAttributes", "documentation":"

Refresh cache information.

" + }, + "NotificationPolicy":{ + "shape":"NotificationPolicy", + "documentation":"

The notification policy of the file share.

" } }, "documentation":"

CreateNFSFileShareInput

" @@ -1845,6 +1938,10 @@ "shape":"Boolean", "documentation":"

Set this value to true to enable access control list (ACL) on the SMB file share. Set it to false to map file and directory permissions to the POSIX permissions.

For more information, see Using Microsoft Windows ACLs to control access to an SMB file share in the AWS Storage Gateway User Guide.

Valid Values: true | false

" }, + "AccessBasedEnumeration":{ + "shape":"Boolean", + "documentation":"

The files and folders on this share will only be visible to users with read access.

" + }, "AdminUserList":{ "shape":"FileShareUserList", "documentation":"

A list of users or groups in the Active Directory that will be granted administrator privileges on the file share. These users can do all file operations as the super-user. Acceptable formats include: DOMAIN\\User1, user1, @group1, and @DOMAIN\\group1.

Use this option very carefully, because any user in this list can do anything they like on the file share, regardless of file permissions.

" @@ -1880,6 +1977,10 @@ "CacheAttributes":{ "shape":"CacheAttributes", "documentation":"

Refresh cache information.

" + }, + "NotificationPolicy":{ + "shape":"NotificationPolicy", + "documentation":"

The notification policy of the file share.

" } }, "documentation":"

CreateSMBFileShareInput

" @@ -2197,6 +2298,12 @@ "max":6, "min":0 }, + "DaysOfWeek":{ + "type":"list", + "member":{"shape":"DayOfWeek"}, + "max":7, + "min":1 + }, "DeleteAutomaticTapeCreationPolicyInput":{ "type":"structure", "required":["GatewayARN"], @@ -2469,6 +2576,23 @@ }, "documentation":"

A JSON object containing the following fields:

" }, + "DescribeBandwidthRateLimitScheduleInput":{ + "type":"structure", + "required":["GatewayARN"], + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "DescribeBandwidthRateLimitScheduleOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "BandwidthRateLimitIntervals":{ + "shape":"BandwidthRateLimitIntervals", + "documentation":"

An array that contains the bandwidth rate limit intervals for a tape or volume gateway.

" + } + } + }, "DescribeCacheInput":{ "type":"structure", "required":["GatewayARN"], @@ -2733,6 +2857,10 @@ "SMBSecurityStrategy":{ "shape":"SMBSecurityStrategy", "documentation":"

The type of security strategy that was specified for file gateway.

  • ClientSpecified: If you use this option, requests are established based on what is negotiated by the client. This option is recommended when you want to maximize compatibility across different clients in your environment.

  • MandatorySigning: If you use this option, file gateway only allows connections from SMBv2 or SMBv3 clients that have signing enabled. This option works with SMB clients on Microsoft Windows Vista, Windows Server 2008 or newer.

  • MandatoryEncryption: If you use this option, file gateway only allows connections from SMBv3 clients that have encryption enabled. This option is highly recommended for environments that handle sensitive data. This option works with SMB clients on Microsoft Windows 8, Windows Server 2012 or newer.

" + }, + "FileSharesVisible":{ + "shape":"Boolean", + "documentation":"

The shares on this gateway appear when listing shares.

" } } }, @@ -3871,6 +3999,10 @@ "CacheAttributes":{ "shape":"CacheAttributes", "documentation":"

Refresh cache information.

" + }, + "NotificationPolicy":{ + "shape":"NotificationPolicy", + "documentation":"

The notification policy of the file share.

" } }, "documentation":"

The Unix file permissions and ownership information assigned, by default, to native S3 objects when file gateway discovers them in S3 buckets. This operation is only supported in file gateways.

" @@ -3912,6 +4044,12 @@ "max":2048, "min":1 }, + "NotificationPolicy":{ + "type":"string", + "max":100, + "min":2, + "pattern":"^\\{[\\w\\s:\\{\\}\\[\\]\"]*}$" + }, "NotifyWhenUploadedInput":{ "type":"structure", "required":["FileShareARN"], @@ -4224,6 +4362,10 @@ "shape":"Boolean", "documentation":"

If this value is set to true, it indicates that access control list (ACL) is enabled on the SMB file share. If it is set to false, it indicates that file and directory permissions are mapped to the POSIX permission.

For more information, see Using Microsoft Windows ACLs to control access to an SMB file share in the AWS Storage Gateway User Guide.

" }, + "AccessBasedEnumeration":{ + "shape":"Boolean", + "documentation":"

Indicates whether AccessBasedEnumeration is enabled.

" + }, "AdminUserList":{ "shape":"FileShareUserList", "documentation":"

A list of users or groups in the Active Directory that have administrator rights to the file share. A group must be prefixed with the @ character. Acceptable formats include: DOMAIN\\User1, user1, @group1, and @DOMAIN\\group1. Can only be set if Authentication is set to ActiveDirectory.

" @@ -4256,6 +4398,10 @@ "CacheAttributes":{ "shape":"CacheAttributes", "documentation":"

Refresh cache information.

" + }, + "NotificationPolicy":{ + "shape":"NotificationPolicy", + "documentation":"

The notification policy of the file share.

" } }, "documentation":"

The Windows file permissions and ownership information assigned, by default, to native S3 objects when file gateway discovers them in S3 buckets. This operation is only supported for file gateways.

" @@ -4812,6 +4958,26 @@ }, "documentation":"

A JSON object containing the Amazon Resource Name (ARN) of the gateway whose throttle information was updated.

" }, + "UpdateBandwidthRateLimitScheduleInput":{ + "type":"structure", + "required":[ + "GatewayARN", + "BandwidthRateLimitIntervals" + ], + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "BandwidthRateLimitIntervals":{ + "shape":"BandwidthRateLimitIntervals", + "documentation":"

An array containing bandwidth rate limit schedule intervals for a gateway. When no bandwidth rate limit intervals have been scheduled, the array is empty.

" + } + } + }, + "UpdateBandwidthRateLimitScheduleOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, "UpdateChapCredentialsInput":{ "type":"structure", "required":[ @@ -4985,6 +5151,10 @@ "CacheAttributes":{ "shape":"CacheAttributes", "documentation":"

Refresh cache information.

" + }, + "NotificationPolicy":{ + "shape":"NotificationPolicy", + "documentation":"

The notification policy of the file share.

" } }, "documentation":"

UpdateNFSFileShareInput

" @@ -5039,6 +5209,10 @@ "shape":"Boolean", "documentation":"

Set this value to true to enable access control list (ACL) on the SMB file share. Set it to false to map file and directory permissions to the POSIX permissions.

For more information, see Using Microsoft Windows ACLs to control access to an SMB file share in the AWS Storage Gateway User Guide.

Valid Values: true | false

" }, + "AccessBasedEnumeration":{ + "shape":"Boolean", + "documentation":"

The files and folders on this share will only be visible to users with read access.

" + }, "AdminUserList":{ "shape":"FileShareUserList", "documentation":"

A list of users or groups in the Active Directory that have administrator rights to the file share. A group must be prefixed with the @ character. Acceptable formats include: DOMAIN\\User1, user1, @group1, and @DOMAIN\\group1. Can only be set if Authentication is set to ActiveDirectory.

" @@ -5066,6 +5240,10 @@ "CacheAttributes":{ "shape":"CacheAttributes", "documentation":"

Refresh cache information.

" + }, + "NotificationPolicy":{ + "shape":"NotificationPolicy", + "documentation":"

The notification policy of the file share.

" } }, "documentation":"

UpdateSMBFileShareInput

" @@ -5080,6 +5258,26 @@ }, "documentation":"

UpdateSMBFileShareOutput

" }, + "UpdateSMBFileShareVisibilityInput":{ + "type":"structure", + "required":[ + "GatewayARN", + "FileSharesVisible" + ], + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "FileSharesVisible":{ + "shape":"Boolean", + "documentation":"

The shares on this gateway appear when listing shares.

" + } + } + }, + "UpdateSMBFileShareVisibilityOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, "UpdateSMBSecurityStrategyInput":{ "type":"structure", "required":[ diff --git a/services/sts/pom.xml b/services/sts/pom.xml index 638c5dbd7467..309e58dc0488 100644 --- a/services/sts/pom.xml +++ b/services/sts/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT sts AWS Java SDK :: Services :: AWS STS diff --git a/services/sts/src/test/java/software/amazon/awssdk/services/sts/internal/StsWebIdentityCredentialsProviderFactoryTest.java b/services/sts/src/test/java/software/amazon/awssdk/services/sts/internal/StsWebIdentityCredentialsProviderFactoryTest.java new file mode 100644 index 000000000000..f55ad2f6b2f3 --- /dev/null +++ b/services/sts/src/test/java/software/amazon/awssdk/services/sts/internal/StsWebIdentityCredentialsProviderFactoryTest.java @@ -0,0 +1,32 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.sts.internal; + +import org.junit.Assert; +import org.junit.Test; +import software.amazon.awssdk.auth.credentials.WebIdentityTokenCredentialsProviderFactory; +import software.amazon.awssdk.auth.credentials.internal.WebIdentityCredentialsUtils; + +public class StsWebIdentityCredentialsProviderFactoryTest { + + + @Test + public void stsWebIdentityCredentialsProviderFactory_with_webIdentityCredentialsUtils() { + WebIdentityTokenCredentialsProviderFactory factory = WebIdentityCredentialsUtils.factory(); + Assert.assertNotNull(factory); + } + +} diff --git a/services/support/pom.xml b/services/support/pom.xml index 3a59ed65059d..e0e3a699d788 100644 --- a/services/support/pom.xml +++ b/services/support/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT support AWS Java SDK :: Services :: AWS Support diff --git a/services/swf/pom.xml b/services/swf/pom.xml index 29b9b3e3b895..d3441c06fad7 100644 --- a/services/swf/pom.xml +++ b/services/swf/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT swf AWS Java SDK :: Services :: Amazon SWF diff --git a/services/synthetics/pom.xml b/services/synthetics/pom.xml index 8bedefb0314c..695cca255f03 100644 --- a/services/synthetics/pom.xml +++ b/services/synthetics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT synthetics AWS Java SDK :: Services :: Synthetics diff --git a/services/synthetics/src/main/resources/codegen-resources/service-2.json b/services/synthetics/src/main/resources/codegen-resources/service-2.json index 985c52799ab4..255bc9bba01d 100644 --- a/services/synthetics/src/main/resources/codegen-resources/service-2.json +++ b/services/synthetics/src/main/resources/codegen-resources/service-2.json @@ -269,7 +269,7 @@ }, "RuntimeVersion":{ "shape":"String", - "documentation":"

Specifies the runtime version to use for the canary. Currently, the only valid values are syn-nodejs-2.0, syn-nodejs-2.0-beta, and syn-1.0. For more information about runtime versions, see Canary Runtime Versions.

" + "documentation":"

Specifies the runtime version to use for the canary. For more information about runtime versions, see Canary Runtime Versions.

" }, "VpcConfig":{"shape":"VpcConfigOutput"}, "Tags":{ @@ -386,6 +386,10 @@ "ActiveTracing":{ "shape":"NullableBoolean", "documentation":"

Specifies whether this canary is to use active AWS X-Ray tracing when it runs. Active tracing enables this canary run to be displayed in the ServiceLens and X-Ray service maps even if the canary does not hit an endpoint that has X-ray tracing enabled. Using X-Ray tracing incurs charges. For more information, see Canaries and X-Ray tracing.

You can enable active tracing only for canaries that use version syn-nodejs-2.0 or later for their canary runtime.

" + }, + "EnvironmentVariables":{ + "shape":"EnvironmentVariablesMap", + "documentation":"

Specifies the keys and values to use for any environment variables used in the canary script. Use the following format:

{ \"key1\" : \"value1\", \"key2\" : \"value2\", ...}

Keys must start with a letter and be at least two characters. The total size of your environment variables cannot exceed 4 KB. You can't specify any Lambda reserved environment variables as the keys for your environment variables. For more information about reserved keys, see Runtime environment variables.

" } }, "documentation":"

A structure that contains input information for a canary run.

" @@ -600,7 +604,7 @@ }, "RuntimeVersion":{ "shape":"String", - "documentation":"

Specifies the runtime version to use for the canary. Currently, the only valid values are syn-nodejs-2.0, syn-nodejs-2.0-beta, and syn-1.0. For more information about runtime versions, see Canary Runtime Versions.

" + "documentation":"

Specifies the runtime version to use for the canary. For a list of valid runtime versions and more information about runtime versions, see Canary Runtime Versions.

" }, "VpcConfig":{ "shape":"VpcConfigInput", @@ -716,6 +720,16 @@ } } }, + "EnvironmentVariableName":{ + "type":"string", + "pattern":"[a-zA-Z]([a-zA-Z0-9_])+" + }, + "EnvironmentVariableValue":{"type":"string"}, + "EnvironmentVariablesMap":{ + "type":"map", + "key":{"shape":"EnvironmentVariableName"}, + "value":{"shape":"EnvironmentVariableValue"} + }, "ErrorMessage":{"type":"string"}, "FunctionArn":{ "type":"string", @@ -858,7 +872,7 @@ "members":{ "VersionName":{ "shape":"String", - "documentation":"

The name of the runtime version. Currently, the only valid values are syn-nodejs-2.0, syn-nodejs-2.0-beta, and syn-1.0.

" + "documentation":"

The name of the runtime version. For a list of valid runtime versions, see Canary Runtime Versions.

" }, "Description":{ "shape":"String", @@ -1035,7 +1049,7 @@ }, "RuntimeVersion":{ "shape":"String", - "documentation":"

Specifies the runtime version to use for the canary. Currently, the only valid values are syn-nodejs-2.0, syn-nodejs-2.0-beta, and syn-1.0. For more information about runtime versions, see Canary Runtime Versions.

" + "documentation":"

Specifies the runtime version to use for the canary. For a list of valid runtime versions and for more information about runtime versions, see Canary Runtime Versions.

" }, "Schedule":{ "shape":"CanaryScheduleInput", diff --git a/services/textract/pom.xml b/services/textract/pom.xml index c1cb81e5a2bf..a1097e5d29d3 100644 --- a/services/textract/pom.xml +++ b/services/textract/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT textract AWS Java SDK :: Services :: Textract diff --git a/services/textract/src/main/resources/codegen-resources/service-2.json b/services/textract/src/main/resources/codegen-resources/service-2.json index 787addcff8ff..9b070d759948 100644 --- a/services/textract/src/main/resources/codegen-resources/service-2.json +++ b/services/textract/src/main/resources/codegen-resources/service-2.json @@ -104,6 +104,7 @@ "errors":[ {"shape":"InvalidParameterException"}, {"shape":"InvalidS3ObjectException"}, + {"shape":"InvalidKMSKeyException"}, {"shape":"UnsupportedDocumentException"}, {"shape":"DocumentTooLargeException"}, {"shape":"BadDocumentException"}, @@ -127,6 +128,7 @@ "errors":[ {"shape":"InvalidParameterException"}, {"shape":"InvalidS3ObjectException"}, + {"shape":"InvalidKMSKeyException"}, {"shape":"UnsupportedDocumentException"}, {"shape":"DocumentTooLargeException"}, {"shape":"BadDocumentException"}, @@ -212,6 +214,10 @@ "shape":"String", "documentation":"

The word or line of text that's recognized by Amazon Textract.

" }, + "TextType":{ + "shape":"TextType", + "documentation":"

The kind of text that Amazon Textract has detected. Can check for handwritten text and printed text.

" + }, "RowIndex":{ "shape":"UInteger", "documentation":"

The row in which a table cell is located. The first row position is 1. RowIndex isn't returned by DetectDocumentText and GetDocumentTextDetection.

" @@ -376,7 +382,7 @@ "type":"structure", "members":{ }, - "documentation":"

The document can't be processed because it's too large. The maximum document size for synchronous operations 5 MB. The maximum document size for asynchronous operations is 500 MB for PDF files.

", + "documentation":"

The document can't be processed because it's too large. The maximum document size for synchronous operations 10 MB. The maximum document size for asynchronous operations is 500 MB for PDF files.

", "exception":true }, "EntityType":{ @@ -644,6 +650,13 @@ "documentation":"

An invalid job identifier was passed to GetDocumentAnalysis or to GetDocumentAnalysis.

", "exception":true }, + "InvalidKMSKeyException":{ + "type":"structure", + "members":{ + }, + "documentation":"

Indicates you do not have decrypt permissions with the KMS key entered, or the KMS key was entered incorrectly.

", + "exception":true + }, "InvalidParameterException":{ "type":"structure", "members":{ @@ -679,6 +692,12 @@ "min":1, "pattern":"[a-zA-Z0-9_.\\-:]+" }, + "KMSKeyId":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"^[A-Za-z0-9][A-Za-z0-9:_/+=,@.-]{0,2048}$" + }, "LimitExceededException":{ "type":"structure", "members":{ @@ -878,6 +897,10 @@ "OutputConfig":{ "shape":"OutputConfig", "documentation":"

Sets if the output will go to a customer defined bucket. By default, Amazon Textract will save the results internally to be accessed by the GetDocumentAnalysis operation.

" + }, + "KMSKeyId":{ + "shape":"KMSKeyId", + "documentation":"

The KMS key used to encrypt the inference results. This can be in either Key ID or Key Alias format. When a KMS key is provided, the KMS key will be used for server-side encryption of the objects in the customer bucket. When this parameter is not enabled, the result will be encrypted server side,using SSE-S3.

" } } }, @@ -913,6 +936,10 @@ "OutputConfig":{ "shape":"OutputConfig", "documentation":"

Sets if the output will go to a customer defined bucket. By default Amazon Textract will save the results internally to be accessed with the GetDocumentTextDetection operation.

" + }, + "KMSKeyId":{ + "shape":"KMSKeyId", + "documentation":"

The KMS key used to encrypt the inference results. This can be in either Key ID or Key Alias format. When a KMS key is provided, the KMS key will be used for server-side encryption of the objects in the customer bucket. When this parameter is not enabled, the result will be encrypted server side,using SSE-S3.

" } } }, @@ -927,6 +954,13 @@ }, "StatusMessage":{"type":"string"}, "String":{"type":"string"}, + "TextType":{ + "type":"string", + "enum":[ + "HANDWRITING", + "PRINTED" + ] + }, "ThrottlingException":{ "type":"structure", "members":{ diff --git a/services/timestreamquery/pom.xml b/services/timestreamquery/pom.xml index 6967a499a635..997b429c105f 100644 --- a/services/timestreamquery/pom.xml +++ b/services/timestreamquery/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT timestreamquery AWS Java SDK :: Services :: Timestream Query diff --git a/services/timestreamquery/src/main/resources/codegen-resources/paginators-1.json b/services/timestreamquery/src/main/resources/codegen-resources/paginators-1.json index 1eb99abdb142..dcc93dac8de3 100644 --- a/services/timestreamquery/src/main/resources/codegen-resources/paginators-1.json +++ b/services/timestreamquery/src/main/resources/codegen-resources/paginators-1.json @@ -5,7 +5,8 @@ "limit_key": "MaxRows", "non_aggregate_keys": [ "ColumnInfo", - "QueryId" + "QueryId", + "QueryStatus" ], "output_token": "NextToken", "result_key": "Rows" diff --git a/services/timestreamquery/src/main/resources/codegen-resources/service-2.json b/services/timestreamquery/src/main/resources/codegen-resources/service-2.json index 4a68cd17b573..fd388619dca7 100644 --- a/services/timestreamquery/src/main/resources/codegen-resources/service-2.json +++ b/services/timestreamquery/src/main/resources/codegen-resources/service-2.json @@ -178,6 +178,7 @@ } } }, + "Double":{"type":"double"}, "Endpoint":{ "type":"structure", "required":[ @@ -288,9 +289,31 @@ "ColumnInfo":{ "shape":"ColumnInfoList", "documentation":"

The column data types of the returned result set.

" + }, + "QueryStatus":{ + "shape":"QueryStatus", + "documentation":"

Information about the status of the query, including progress and bytes scannned.

" } } }, + "QueryStatus":{ + "type":"structure", + "members":{ + "ProgressPercentage":{ + "shape":"Double", + "documentation":"

The progress of the query, expressed as a percentage.

" + }, + "CumulativeBytesScanned":{ + "shape":"Long", + "documentation":"

The amount of data scanned by the query in bytes. This is a cumulative sum and represents the total amount of bytes scanned since the query was started.

" + }, + "CumulativeBytesMetered":{ + "shape":"Long", + "documentation":"

The amount of data scanned by the query in bytes that you will be charged for. This is a cumulative sum and represents the total amount of data that you will be charged for since the query was started. The charge is applied only once and is either applied when the query completes execution or when the query is cancelled.

" + } + }, + "documentation":"

Information about the status of the query, including progress and bytes scannned.

" + }, "QueryString":{ "type":"string", "sensitive":true diff --git a/services/timestreamwrite/pom.xml b/services/timestreamwrite/pom.xml index 1465f0b948b5..1b429872811d 100644 --- a/services/timestreamwrite/pom.xml +++ b/services/timestreamwrite/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT timestreamwrite AWS Java SDK :: Services :: Timestream Write diff --git a/services/timestreamwrite/src/main/resources/codegen-resources/service-2.json b/services/timestreamwrite/src/main/resources/codegen-resources/service-2.json index 2c184eb3e562..9ced833c8668 100644 --- a/services/timestreamwrite/src/main/resources/codegen-resources/service-2.json +++ b/services/timestreamwrite/src/main/resources/codegen-resources/service-2.json @@ -70,7 +70,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InvalidEndpointException"} ], - "documentation":"

Deletes a given Timestream database. This is an irreversible operation. After a database is deleted, the time series data from its tables cannot be recovered.

All tables in the database must be deleted first, or a ValidationException error will be thrown.

", + "documentation":"

Deletes a given Timestream database. This is an irreversible operation. After a database is deleted, the time series data from its tables cannot be recovered.

All tables in the database must be deleted first, or a ValidationException error will be thrown.

Due to the nature of distributed retries, the operation can return either success or a ResourceNotFoundException. Clients should consider them equivalent.

", "endpointdiscovery":{"required":true} }, "DeleteTable":{ @@ -88,7 +88,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InvalidEndpointException"} ], - "documentation":"

Deletes a given Timestream table. This is an irreversible operation. After a Timestream database table is deleted, the time series data stored in the table cannot be recovered.

", + "documentation":"

Deletes a given Timestream table. This is an irreversible operation. After a Timestream database table is deleted, the time series data stored in the table cannot be recovered.

Due to the nature of distributed retries, the operation can return either success or a ResourceNotFoundException. Clients should consider them equivalent.

", "endpointdiscovery":{"required":true} }, "DescribeDatabase":{ @@ -192,6 +192,7 @@ "output":{"shape":"ListTagsForResourceResponse"}, "errors":[ {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, {"shape":"ValidationException"}, {"shape":"InvalidEndpointException"} ], @@ -209,6 +210,7 @@ "errors":[ {"shape":"ResourceNotFoundException"}, {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, {"shape":"ValidationException"}, {"shape":"InvalidEndpointException"} ], @@ -226,6 +228,7 @@ "errors":[ {"shape":"ValidationException"}, {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"}, {"shape":"InvalidEndpointException"} ], @@ -507,7 +510,7 @@ "members":{ "Name":{ "shape":"StringValue256", - "documentation":"

Dimension represents the meta data attributes of the time series. For example, the name and availability zone of an EC2 instance or the name of the manufacturer of a wind turbine are dimensions. Dimension names can only contain alphanumeric characters and underscores. Dimension names cannot end with an underscore.

" + "documentation":"

Dimension represents the meta data attributes of the time series. For example, the name and availability zone of an EC2 instance or the name of the manufacturer of a wind turbine are dimensions.

For constraints on Dimension names, see Naming Constraints.

" }, "Value":{ "shape":"StringValue2048", @@ -692,16 +695,22 @@ }, "Time":{ "shape":"StringValue256", - "documentation":"

Contains the time at which the measure value for the data point was collected.

" + "documentation":"

Contains the time at which the measure value for the data point was collected. The time value plus the unit provides the time elapsed since the epoch. For example, if the time value is 12345 and the unit is ms, then 12345 ms have elapsed since the epoch.

" }, "TimeUnit":{ "shape":"TimeUnit", "documentation":"

The granularity of the timestamp unit. It indicates if the time value is in seconds, milliseconds, nanoseconds or other supported values.

" + }, + "Version":{ + "shape":"RecordVersion", + "documentation":"

64-bit attribute used for record updates. Write requests for duplicate data with a higher version number will update the existing measure value and version. In cases where the measure value is the same, Version will still be updated . Default value is to 1.

", + "box":true } }, "documentation":"

Record represents a time series data point being written into Timestream. Each record contains an array of dimensions. Dimensions represent the meta data attributes of a time series data point such as the instance name or availability zone of an EC2 instance. A record also contains the measure name which is the name of the measure being collected for example the CPU utilization of an EC2 instance. A record also contains the measure value and the value type which is the data type of the measure value. In addition, the record contains the timestamp when the measure was collected that the timestamp unit which represents the granularity of the timestamp.

" }, "RecordIndex":{"type":"integer"}, + "RecordVersion":{"type":"long"}, "Records":{ "type":"list", "member":{"shape":"Record"}, @@ -717,7 +726,12 @@ }, "Reason":{ "shape":"ErrorMessage", - "documentation":"

The reason why a record was not successfully inserted into Timestream. Possible causes of failure include:

  • Records with duplicate data where there are multiple records with the same dimensions, timestamps, and measure names but different measure values.

  • Records with timestamps that lie outside the retention duration of the memory store

  • Records with dimensions or measures that exceed the Timestream defined limits.

For more information, see Access Management in the Timestream Developer Guide.

" + "documentation":"

The reason why a record was not successfully inserted into Timestream. Possible causes of failure include:

  • Records with duplicate data where there are multiple records with the same dimensions, timestamps, and measure names but different measure values.

  • Records with timestamps that lie outside the retention duration of the memory store

    When the retention window is updated, you will receive a RejectedRecords exception if you immediately try to ingest data within the new window. To avoid a RejectedRecords exception, wait until the duration of the new window to ingest new data. For further information, see Best Practices for Configuring Timestream and the explanation of how storage works in Timestream.

  • Records with dimensions or measures that exceed the Timestream defined limits.

For more information, see Access Management in the Timestream Developer Guide.

" + }, + "ExistingVersion":{ + "shape":"RecordVersion", + "documentation":"

The existing version of the record. This value is populated in scenarios where an identical record exists with a higher version than the version in the write request.

", + "box":true } }, "documentation":"

Records that were not successfully inserted into Timestream due to data validation issues that must be resolved prior to reinserting time series data into the system.

" diff --git a/services/transcribe/pom.xml b/services/transcribe/pom.xml index 41fdcf834b80..a944c35a058d 100644 --- a/services/transcribe/pom.xml +++ b/services/transcribe/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT transcribe AWS Java SDK :: Services :: Transcribe diff --git a/services/transcribestreaming/pom.xml b/services/transcribestreaming/pom.xml index 6172af598a28..3690f53f64ac 100644 --- a/services/transcribestreaming/pom.xml +++ b/services/transcribestreaming/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT transcribestreaming AWS Java SDK :: Services :: AWS Transcribe Streaming diff --git a/services/transcribestreaming/src/main/resources/codegen-resources/service-2.json b/services/transcribestreaming/src/main/resources/codegen-resources/service-2.json index f7d2a8ddbd52..11fc8d66fd90 100644 --- a/services/transcribestreaming/src/main/resources/codegen-resources/service-2.json +++ b/services/transcribestreaming/src/main/resources/codegen-resources/service-2.json @@ -12,6 +12,23 @@ "uid":"transcribe-streaming-2017-10-26" }, "operations":{ + "StartMedicalStreamTranscription":{ + "name":"StartMedicalStreamTranscription", + "http":{ + "method":"POST", + "requestUri":"/medical-stream-transcription" + }, + "input":{"shape":"StartMedicalStreamTranscriptionRequest"}, + "output":{"shape":"StartMedicalStreamTranscriptionResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalFailureException"}, + {"shape":"ConflictException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

Starts a bidirectional HTTP/2 stream where audio is streamed to Amazon Transcribe Medical and the transcription results are streamed to your application.

" + }, "StartStreamTranscription":{ "name":"StartStreamTranscription", "http":{ @@ -78,11 +95,12 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

One or more arguments to the StartStreamTranscription operation was invalid. For example, MediaEncoding was not set to pcm or LanguageCode was not set to a valid code. Check the parameters and try your request again.

", + "documentation":"

One or more arguments to the StartStreamTranscription or StartMedicalStreamTranscription operation was invalid. For example, MediaEncoding was not set to a valid encoding, or LanguageCode was not set to a valid code. Check the parameters and try your request again.

", "error":{"httpStatusCode":400}, "exception":true }, "Boolean":{"type":"boolean"}, + "Confidence":{"type":"double"}, "ConflictException":{ "type":"structure", "members":{ @@ -98,7 +116,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

A problem occurred while processing the audio. Amazon Transcribe terminated processing. Try your request again.

", + "documentation":"

A problem occurred while processing the audio. Amazon Transcribe or Amazon Transcribe Medical terminated processing. Try your request again.

", "error":{"httpStatusCode":500}, "exception":true, "fault":true @@ -152,7 +170,12 @@ "es-US", "fr-CA", "fr-FR", - "en-AU" + "en-AU", + "it-IT", + "de-DE", + "pt-BR", + "ja-JP", + "ko-KR" ] }, "LimitExceededException":{ @@ -166,13 +189,140 @@ }, "MediaEncoding":{ "type":"string", - "enum":["pcm"] + "enum":[ + "pcm", + "ogg-opus", + "flac" + ] }, "MediaSampleRateHertz":{ "type":"integer", "max":48000, "min":8000 }, + "MedicalAlternative":{ + "type":"structure", + "members":{ + "Transcript":{ + "shape":"String", + "documentation":"

The text that was transcribed from the audio.

" + }, + "Items":{ + "shape":"MedicalItemList", + "documentation":"

A list of objects that contains words and punctuation marks that represents one or more interpretations of the input audio.

" + } + }, + "documentation":"

A list of possible transcriptions for the audio.

" + }, + "MedicalAlternativeList":{ + "type":"list", + "member":{"shape":"MedicalAlternative"} + }, + "MedicalItem":{ + "type":"structure", + "members":{ + "StartTime":{ + "shape":"Double", + "documentation":"

The number of seconds into an audio stream that indicates the creation time of an item.

" + }, + "EndTime":{ + "shape":"Double", + "documentation":"

The number of seconds into an audio stream that indicates the creation time of an item.

" + }, + "Type":{ + "shape":"ItemType", + "documentation":"

The type of the item. PRONUNCIATION indicates that the item is a word that was recognized in the input audio. PUNCTUATION indicates that the item was interpreted as a pause in the input audio, such as a period to indicate the end of a sentence.

" + }, + "Content":{ + "shape":"String", + "documentation":"

The word or punctuation mark that was recognized in the input audio.

" + }, + "Confidence":{ + "shape":"Confidence", + "documentation":"

A value between 0 and 1 for an item that is a confidence score that Amazon Transcribe Medical assigns to each word that it transcribes.

" + }, + "Speaker":{ + "shape":"String", + "documentation":"

If speaker identification is enabled, shows the integer values that correspond to the different speakers identified in the stream. For example, if the value of Speaker in the stream is either a 0 or a 1, that indicates that Amazon Transcribe Medical has identified two speakers in the stream. The value of 0 corresponds to one speaker and the value of 1 corresponds to the other speaker.

" + } + }, + "documentation":"

A word or punctuation that is transcribed from the input audio.

" + }, + "MedicalItemList":{ + "type":"list", + "member":{"shape":"MedicalItem"} + }, + "MedicalResult":{ + "type":"structure", + "members":{ + "ResultId":{ + "shape":"String", + "documentation":"

A unique identifier for the result.

" + }, + "StartTime":{ + "shape":"Double", + "documentation":"

The time, in seconds, from the beginning of the audio stream to the beginning of the result.

" + }, + "EndTime":{ + "shape":"Double", + "documentation":"

The time, in seconds, from the beginning of the audio stream to the end of the result.

" + }, + "IsPartial":{ + "shape":"Boolean", + "documentation":"

Amazon Transcribe Medical divides the incoming audio stream into segments at natural points in the audio. Transcription results are returned based on these segments.

The IsPartial field is true to indicate that Amazon Transcribe Medical has additional transcription data to send. The IsPartial field is false to indicate that this is the last transcription result for the segment.

" + }, + "Alternatives":{ + "shape":"MedicalAlternativeList", + "documentation":"

A list of possible transcriptions of the audio. Each alternative typically contains one Item that contains the result of the transcription.

" + }, + "ChannelId":{ + "shape":"String", + "documentation":"

When channel identification is enabled, Amazon Transcribe Medical transcribes the speech from each audio channel separately.

You can use ChannelId to retrieve the transcription results for a single channel in your audio stream.

" + } + }, + "documentation":"

The results of transcribing a portion of the input audio stream.

" + }, + "MedicalResultList":{ + "type":"list", + "member":{"shape":"MedicalResult"} + }, + "MedicalTranscript":{ + "type":"structure", + "members":{ + "Results":{ + "shape":"MedicalResultList", + "documentation":"

MedicalResult objects that contain the results of transcribing a portion of the input audio stream. The array can be empty.

" + } + }, + "documentation":"

The medical transcript in a MedicalTranscriptEvent.

" + }, + "MedicalTranscriptEvent":{ + "type":"structure", + "members":{ + "Transcript":{ + "shape":"MedicalTranscript", + "documentation":"

The transcription of the audio stream. The transcription is composed of all of the items in the results list.

" + } + }, + "documentation":"

Represents a set of transcription results from the server to the client. It contains one or more segments of the transcription.

", + "event":true + }, + "MedicalTranscriptResultStream":{ + "type":"structure", + "members":{ + "TranscriptEvent":{ + "shape":"MedicalTranscriptEvent", + "documentation":"

A portion of the transcription of the audio stream. Events are sent periodically from Amazon Transcribe Medical to your application. The event can be a partial transcription of a section of the audio stream, or it can be the entire transcription of that portion of the audio stream.

" + }, + "BadRequestException":{"shape":"BadRequestException"}, + "LimitExceededException":{"shape":"LimitExceededException"}, + "InternalFailureException":{"shape":"InternalFailureException"}, + "ConflictException":{"shape":"ConflictException"}, + "ServiceUnavailableException":{"shape":"ServiceUnavailableException"} + }, + "documentation":"

Represents the transcription result stream from Amazon Transcribe Medical to your application.

", + "eventstream":true + }, "NumberOfChannels":{ "type":"integer", "min":2 @@ -227,6 +377,168 @@ "min":36, "pattern":"[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}" }, + "Specialty":{ + "type":"string", + "enum":[ + "PRIMARYCARE", + "CARDIOLOGY", + "NEUROLOGY", + "ONCOLOGY", + "RADIOLOGY", + "UROLOGY" + ] + }, + "StartMedicalStreamTranscriptionRequest":{ + "type":"structure", + "required":[ + "LanguageCode", + "MediaSampleRateHertz", + "MediaEncoding", + "Specialty", + "Type", + "AudioStream" + ], + "members":{ + "LanguageCode":{ + "shape":"LanguageCode", + "documentation":"

Indicates the source language used in the input audio stream. For Amazon Transcribe Medical, this is US English (en-US).

", + "location":"header", + "locationName":"x-amzn-transcribe-language-code" + }, + "MediaSampleRateHertz":{ + "shape":"MediaSampleRateHertz", + "documentation":"

The sample rate of the input audio in Hertz. Sample rates of 16000 Hz or higher are accepted.

", + "location":"header", + "locationName":"x-amzn-transcribe-sample-rate" + }, + "MediaEncoding":{ + "shape":"MediaEncoding", + "documentation":"

The encoding used for the input audio.

", + "location":"header", + "locationName":"x-amzn-transcribe-media-encoding" + }, + "VocabularyName":{ + "shape":"VocabularyName", + "documentation":"

The name of the medical custom vocabulary to use when processing the real-time stream.

", + "location":"header", + "locationName":"x-amzn-transcribe-vocabulary-name" + }, + "Specialty":{ + "shape":"Specialty", + "documentation":"

The medical specialty of the clinician or provider.

", + "location":"header", + "locationName":"x-amzn-transcribe-specialty" + }, + "Type":{ + "shape":"Type", + "documentation":"

The type of input audio. Choose DICTATION for a provider dictating patient notes. Choose CONVERSATION for a dialogue between a patient and one or more medical professionanls.

", + "location":"header", + "locationName":"x-amzn-transcribe-type" + }, + "ShowSpeakerLabel":{ + "shape":"Boolean", + "documentation":"

When true, enables speaker identification in your real-time stream.

", + "location":"header", + "locationName":"x-amzn-transcribe-show-speaker-label" + }, + "SessionId":{ + "shape":"SessionId", + "documentation":"

Optional. An identifier for the transcription session. If you don't provide a session ID, Amazon Transcribe generates one for you and returns it in the response.

", + "location":"header", + "locationName":"x-amzn-transcribe-session-id" + }, + "AudioStream":{"shape":"AudioStream"}, + "EnableChannelIdentification":{ + "shape":"Boolean", + "documentation":"

When true, instructs Amazon Transcribe Medical to process each audio channel separately and then merge the transcription output of each channel into a single transcription.

Amazon Transcribe Medical also produces a transcription of each item. An item includes the start time, end time, and any alternative transcriptions.

You can't set both ShowSpeakerLabel and EnableChannelIdentification in the same request. If you set both, your request returns a BadRequestException.

", + "location":"header", + "locationName":"x-amzn-transcribe-enable-channel-identification" + }, + "NumberOfChannels":{ + "shape":"NumberOfChannels", + "documentation":"

The number of channels that are in your audio stream.

", + "location":"header", + "locationName":"x-amzn-transcribe-number-of-channels" + } + }, + "payload":"AudioStream" + }, + "StartMedicalStreamTranscriptionResponse":{ + "type":"structure", + "members":{ + "RequestId":{ + "shape":"RequestId", + "documentation":"

An identifier for the streaming transcription.

", + "location":"header", + "locationName":"x-amzn-request-id" + }, + "LanguageCode":{ + "shape":"LanguageCode", + "documentation":"

The language code for the response transcript. For Amazon Transcribe Medical, this is US English (en-US).

", + "location":"header", + "locationName":"x-amzn-transcribe-language-code" + }, + "MediaSampleRateHertz":{ + "shape":"MediaSampleRateHertz", + "documentation":"

The sample rate of the input audio in Hertz. Valid value: 16000 Hz.

", + "location":"header", + "locationName":"x-amzn-transcribe-sample-rate" + }, + "MediaEncoding":{ + "shape":"MediaEncoding", + "documentation":"

The encoding used for the input audio stream.

", + "location":"header", + "locationName":"x-amzn-transcribe-media-encoding" + }, + "VocabularyName":{ + "shape":"VocabularyName", + "documentation":"

The name of the vocabulary used when processing the stream.

", + "location":"header", + "locationName":"x-amzn-transcribe-vocabulary-name" + }, + "Specialty":{ + "shape":"Specialty", + "documentation":"

The specialty in the medical domain.

", + "location":"header", + "locationName":"x-amzn-transcribe-specialty" + }, + "Type":{ + "shape":"Type", + "documentation":"

The type of audio that was transcribed.

", + "location":"header", + "locationName":"x-amzn-transcribe-type" + }, + "ShowSpeakerLabel":{ + "shape":"Boolean", + "documentation":"

Shows whether speaker identification was enabled in the stream.

", + "location":"header", + "locationName":"x-amzn-transcribe-show-speaker-label" + }, + "SessionId":{ + "shape":"SessionId", + "documentation":"

Optional. An identifier for the transcription session. If you don't provide a session ID, Amazon Transcribe generates one for you and returns it in the response.

", + "location":"header", + "locationName":"x-amzn-transcribe-session-id" + }, + "TranscriptResultStream":{ + "shape":"MedicalTranscriptResultStream", + "documentation":"

Represents the stream of transcription events from Amazon Transcribe Medical to your application.

" + }, + "EnableChannelIdentification":{ + "shape":"Boolean", + "documentation":"

Shows whether channel identification has been enabled in the stream.

", + "location":"header", + "locationName":"x-amzn-transcribe-enable-channel-identification" + }, + "NumberOfChannels":{ + "shape":"NumberOfChannels", + "documentation":"

The number of channels identified in the stream.

", + "location":"header", + "locationName":"x-amzn-transcribe-number-of-channels" + } + }, + "payload":"TranscriptResultStream" + }, "StartStreamTranscriptionRequest":{ "type":"structure", "required":[ @@ -250,7 +562,7 @@ }, "MediaEncoding":{ "shape":"MediaEncoding", - "documentation":"

The encoding used for the input audio. pcm is the only valid value.

", + "documentation":"

The encoding used for the input audio.

", "location":"header", "locationName":"x-amzn-transcribe-media-encoding" }, @@ -432,6 +744,13 @@ "documentation":"

Represents the transcription result stream from Amazon Transcribe to your application.

", "eventstream":true }, + "Type":{ + "type":"string", + "enum":[ + "CONVERSATION", + "DICTATION" + ] + }, "VocabularyFilterMethod":{ "type":"string", "enum":[ diff --git a/services/transfer/pom.xml b/services/transfer/pom.xml index 710aece7c4c1..0800922211c4 100644 --- a/services/transfer/pom.xml +++ b/services/transfer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT transfer AWS Java SDK :: Services :: Transfer diff --git a/services/transfer/src/main/resources/codegen-resources/service-2.json b/services/transfer/src/main/resources/codegen-resources/service-2.json index 908f6595f6c7..0e130ed87bf0 100644 --- a/services/transfer/src/main/resources/codegen-resources/service-2.json +++ b/services/transfer/src/main/resources/codegen-resources/service-2.json @@ -257,7 +257,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Changes the state of a file transfer protocol-enabled server from ONLINE to OFFLINE. An OFFLINE server cannot accept and process file transfer jobs. Information tied to your server, such as server and user properties, are not affected by stopping your server. Stopping the server will not reduce or impact your file transfer protocol endpoint billing.

The state of STOPPING indicates that the server is in an intermediate state, either not fully able to respond, or not fully offline. The values of STOP_FAILED can indicate an error condition.

No response is returned from this call.

" + "documentation":"

Changes the state of a file transfer protocol-enabled server from ONLINE to OFFLINE. An OFFLINE server cannot accept and process file transfer jobs. Information tied to your server, such as server and user properties, are not affected by stopping your server.

Stopping the server will not reduce or impact your file transfer protocol endpoint billing; you must delete the server to stop being billed.

The state of STOPPING indicates that the server is in an intermediate state, either not fully able to respond, or not fully offline. The values of STOP_FAILED can indicate an error condition.

No response is returned from this call.

" }, "TagResource":{ "name":"TagResource", @@ -386,11 +386,11 @@ }, "EndpointDetails":{ "shape":"EndpointDetails", - "documentation":"

The virtual private cloud (VPC) endpoint settings that are configured for your file transfer protocol-enabled server. When you host your endpoint within your VPC, you can make it accessible only to resources within your VPC, or you can attach Elastic IPs and make it accessible to clients over the internet. Your VPC's default security groups are automatically assigned to your endpoint.

" + "documentation":"

The virtual private cloud (VPC) endpoint settings that are configured for your server. When you host your endpoint within your VPC, you can make it accessible only to resources within your VPC, or you can attach Elastic IPs and make it accessible to clients over the internet. Your VPC's default security groups are automatically assigned to your endpoint.

" }, "EndpointType":{ "shape":"EndpointType", - "documentation":"

The type of VPC endpoint that you want your file transfer protocol-enabled server to connect to. You can choose to connect to the public internet or a VPC endpoint. With a VPC endpoint, you can restrict access to your server and resources only within your VPC.

It is recommended that you use VPC as the EndpointType. With this endpoint type, you have the option to directly associate up to three Elastic IPv4 addresses (BYO IP included) with your server's endpoint and use VPC security groups to restrict traffic by the client's public IP address. This is not possible with EndpointType set to VPC_ENDPOINT.

" + "documentation":"

The type of VPC endpoint that you want your server to connect to. You can choose to connect to the public internet or a VPC endpoint. With a VPC endpoint, you can restrict access to your server and resources only within your VPC.

It is recommended that you use VPC as the EndpointType. With this endpoint type, you have the option to directly associate up to three Elastic IPv4 addresses (BYO IP included) with your server's endpoint and use VPC security groups to restrict traffic by the client's public IP address. This is not possible with EndpointType set to VPC_ENDPOINT.

" }, "HostKey":{ "shape":"HostKey", @@ -402,7 +402,7 @@ }, "IdentityProviderType":{ "shape":"IdentityProviderType", - "documentation":"

Specifies the mode of authentication for a file transfer protocol-enabled server. The default value is SERVICE_MANAGED, which allows you to store and access user credentials within the AWS Transfer Family service. Use the API_GATEWAY value to integrate with an identity provider of your choosing. The API_GATEWAY setting requires you to provide an API Gateway endpoint URL to call for authentication using the IdentityProviderDetails parameter.

" + "documentation":"

Specifies the mode of authentication for a server. The default value is SERVICE_MANAGED, which allows you to store and access user credentials within the AWS Transfer Family service. Use the API_GATEWAY value to integrate with an identity provider of your choosing. The API_GATEWAY setting requires you to provide an API Gateway endpoint URL to call for authentication using the IdentityProviderDetails parameter.

" }, "LoggingRole":{ "shape":"Role", @@ -410,7 +410,7 @@ }, "Protocols":{ "shape":"Protocols", - "documentation":"

Specifies the file transfer protocol or protocols over which your file transfer protocol client can connect to your server's endpoint. The available protocols are:

  • SFTP (Secure Shell (SSH) File Transfer Protocol): File transfer over SSH

  • FTPS (File Transfer Protocol Secure): File transfer with TLS encryption

  • FTP (File Transfer Protocol): Unencrypted file transfer

If you select FTPS, you must choose a certificate stored in AWS Certificate Manager (ACM) which will be used to identify your file transfer protocol-enabled server when clients connect to it over FTPS.

If Protocol includes either FTP or FTPS, then the EndpointType must be VPC and the IdentityProviderType must be API_GATEWAY.

If Protocol includes FTP, then AddressAllocationIds cannot be associated.

If Protocol is set only to SFTP, the EndpointType can be set to PUBLIC and the IdentityProviderType can be set to SERVICE_MANAGED.

" + "documentation":"

Specifies the file transfer protocol or protocols over which your file transfer protocol client can connect to your server's endpoint. The available protocols are:

  • SFTP (Secure Shell (SSH) File Transfer Protocol): File transfer over SSH

  • FTPS (File Transfer Protocol Secure): File transfer with TLS encryption

  • FTP (File Transfer Protocol): Unencrypted file transfer

If you select FTPS, you must choose a certificate stored in AWS Certificate Manager (ACM) which will be used to identify your server when clients connect to it over FTPS.

If Protocol includes either FTP or FTPS, then the EndpointType must be VPC and the IdentityProviderType must be API_GATEWAY.

If Protocol includes FTP, then AddressAllocationIds cannot be associated.

If Protocol is set only to SFTP, the EndpointType can be set to PUBLIC and the IdentityProviderType can be set to SERVICE_MANAGED.

" }, "SecurityPolicyName":{ "shape":"SecurityPolicyName", @@ -418,7 +418,7 @@ }, "Tags":{ "shape":"Tags", - "documentation":"

Key-value pairs that can be used to group and search for file transfer protocol-enabled servers.

" + "documentation":"

Key-value pairs that can be used to group and search for servers.

" } } }, @@ -428,7 +428,7 @@ "members":{ "ServerId":{ "shape":"ServerId", - "documentation":"

The service-assigned ID of the file transfer protocol-enabled server that is created.

" + "documentation":"

The service-assigned ID of the server that is created.

" } } }, @@ -442,11 +442,11 @@ "members":{ "HomeDirectory":{ "shape":"HomeDirectory", - "documentation":"

The landing directory (folder) for a user when they log in to the file transfer protocol-enabled server using the client.

An example is your-Amazon-S3-bucket-name>/home/username .

" + "documentation":"

The landing directory (folder) for a user when they log in to the server using the client.

An example is your-Amazon-S3-bucket-name>/home/username .

" }, "HomeDirectoryType":{ "shape":"HomeDirectoryType", - "documentation":"

The type of landing directory (folder) you want your users' home directory to be when they log into the file transfer protocol-enabled server. If you set it to PATH, the user will see the absolute Amazon S3 bucket paths as is in their file transfer protocol clients. If you set it LOGICAL, you will need to provide mappings in the HomeDirectoryMappings for how you want to make Amazon S3 paths visible to your users.

" + "documentation":"

The type of landing directory (folder) you want your users' home directory to be when they log into the server. If you set it to PATH, the user will see the absolute Amazon S3 bucket paths as is in their file transfer protocol clients. If you set it LOGICAL, you will need to provide mappings in the HomeDirectoryMappings for how you want to make Amazon S3 paths visible to your users.

" }, "HomeDirectoryMappings":{ "shape":"HomeDirectoryMappings", @@ -458,15 +458,15 @@ }, "Role":{ "shape":"Role", - "documentation":"

The IAM role that controls your users' access to your Amazon S3 bucket. The policies attached to this role will determine the level of access you want to provide your users when transferring files into and out of your Amazon S3 bucket or buckets. The IAM role should also contain a trust relationship that allows the file transfer protocol-enabled server to access your resources when servicing your users' transfer requests.

" + "documentation":"

The IAM role that controls your users' access to your Amazon S3 bucket. The policies attached to this role will determine the level of access you want to provide your users when transferring files into and out of your Amazon S3 bucket or buckets. The IAM role should also contain a trust relationship that allows the server to access your resources when servicing your users' transfer requests.

" }, "ServerId":{ "shape":"ServerId", - "documentation":"

A system-assigned unique identifier for a file transfer protocol-enabled server instance. This is the specific server that you added your user to.

" + "documentation":"

A system-assigned unique identifier for a server instance. This is the specific server that you added your user to.

" }, "SshPublicKeyBody":{ "shape":"SshPublicKeyBody", - "documentation":"

The public portion of the Secure Shell (SSH) key used to authenticate the user to the file transfer protocol-enabled server.

" + "documentation":"

The public portion of the Secure Shell (SSH) key used to authenticate the user to the server.

" }, "Tags":{ "shape":"Tags", @@ -474,7 +474,7 @@ }, "UserName":{ "shape":"UserName", - "documentation":"

A unique string that identifies a user and is associated with a file transfer protocol-enabled server as specified by the ServerId. This user name must be a minimum of 3 and a maximum of 100 characters long. The following are valid characters: a-z, A-Z, 0-9, underscore '_', hyphen '-', period '.', and at sign '@'. The user name can't start with a hyphen, period, and at sign.

" + "documentation":"

A unique string that identifies a user and is associated with a as specified by the ServerId. This user name must be a minimum of 3 and a maximum of 100 characters long. The following are valid characters: a-z, A-Z, 0-9, underscore '_', hyphen '-', period '.', and at sign '@'. The user name can't start with a hyphen, period, or at sign.

" } } }, @@ -487,11 +487,11 @@ "members":{ "ServerId":{ "shape":"ServerId", - "documentation":"

The ID of the file transfer protocol-enabled server that the user is attached to.

" + "documentation":"

The ID of the server that the user is attached to.

" }, "UserName":{ "shape":"UserName", - "documentation":"

A unique string that identifies a user account associated with a file transfer protocol-enabled server.

" + "documentation":"

A unique string that identifies a user account associated with a server.

" } } }, @@ -502,7 +502,7 @@ "members":{ "ServerId":{ "shape":"ServerId", - "documentation":"

A unique system-assigned identifier for a file transfer protocol-enabled server instance.

" + "documentation":"

A unique system-assigned identifier for a server instance.

" } } }, @@ -537,11 +537,11 @@ "members":{ "ServerId":{ "shape":"ServerId", - "documentation":"

A system-assigned unique identifier for a file transfer protocol-enabled server instance that has the user assigned to it.

" + "documentation":"

A system-assigned unique identifier for a server instance that has the user assigned to it.

" }, "UserName":{ "shape":"UserName", - "documentation":"

A unique string that identifies a user that is being deleted from a file transfer protocol-enabled server.

" + "documentation":"

A unique string that identifies a user that is being deleted from a server.

" } } }, @@ -571,7 +571,7 @@ "members":{ "ServerId":{ "shape":"ServerId", - "documentation":"

A system-assigned unique identifier for a file transfer protocol-enabled server.

" + "documentation":"

A system-assigned unique identifier for a server.

" } } }, @@ -581,7 +581,7 @@ "members":{ "Server":{ "shape":"DescribedServer", - "documentation":"

An array containing the properties of a file transfer protocol-enabled server with the ServerID you specified.

" + "documentation":"

An array containing the properties of a server with the ServerID you specified.

" } } }, @@ -594,11 +594,11 @@ "members":{ "ServerId":{ "shape":"ServerId", - "documentation":"

A system-assigned unique identifier for a file transfer protocol-enabled server that has this user assigned.

" + "documentation":"

A system-assigned unique identifier for a server that has this user assigned.

" }, "UserName":{ "shape":"UserName", - "documentation":"

The name of the user assigned to one or more file transfer protocol-enabled servers. User names are part of the sign-in credentials to use the AWS Transfer Family service and perform file transfer tasks.

" + "documentation":"

The name of the user assigned to one or more servers. User names are part of the sign-in credentials to use the AWS Transfer Family service and perform file transfer tasks.

" } } }, @@ -611,7 +611,7 @@ "members":{ "ServerId":{ "shape":"ServerId", - "documentation":"

A system-assigned unique identifier for a file transfer protocol-enabled server that has this user assigned.

" + "documentation":"

A system-assigned unique identifier for a server that has this user assigned.

" }, "User":{ "shape":"DescribedUser", @@ -656,7 +656,7 @@ "members":{ "Arn":{ "shape":"Arn", - "documentation":"

Specifies the unique Amazon Resource Name (ARN) of the file transfer protocol-enabled server.

" + "documentation":"

Specifies the unique Amazon Resource Name (ARN) of the server.

" }, "Certificate":{ "shape":"Certificate", @@ -664,11 +664,11 @@ }, "EndpointDetails":{ "shape":"EndpointDetails", - "documentation":"

Specifies the virtual private cloud (VPC) endpoint settings that you configured for your file transfer protocol-enabled server.

" + "documentation":"

Specifies the virtual private cloud (VPC) endpoint settings that you configured for your server.

" }, "EndpointType":{ "shape":"EndpointType", - "documentation":"

Defines the type of endpoint that your file transfer protocol-enabled server is connected to. If your server is connected to a VPC endpoint, your server isn't accessible over the public internet.

" + "documentation":"

Defines the type of endpoint that your server is connected to. If your server is connected to a VPC endpoint, your server isn't accessible over the public internet.

" }, "HostKeyFingerprint":{ "shape":"HostKeyFingerprint", @@ -676,15 +676,15 @@ }, "IdentityProviderDetails":{ "shape":"IdentityProviderDetails", - "documentation":"

Specifies information to call a customer-supplied authentication API. This field is not populated when the IdentityProviderType of a file transfer protocol-enabled server is SERVICE_MANAGED.

" + "documentation":"

Specifies information to call a customer-supplied authentication API. This field is not populated when the IdentityProviderType of a server is SERVICE_MANAGED.

" }, "IdentityProviderType":{ "shape":"IdentityProviderType", - "documentation":"

Specifies the mode of authentication method enabled for this service. A value of SERVICE_MANAGED means that you are using this file transfer protocol-enabled server to store and access user credentials within the service. A value of API_GATEWAY indicates that you have integrated an API Gateway endpoint that will be invoked for authenticating your user into the service.

" + "documentation":"

Specifies the mode of authentication method enabled for this service. A value of SERVICE_MANAGED means that you are using this server to store and access user credentials within the service. A value of API_GATEWAY indicates that you have integrated an API Gateway endpoint that will be invoked for authenticating your user into the service.

" }, "LoggingRole":{ "shape":"Role", - "documentation":"

Specifies the AWS Identity and Access Management (IAM) role that allows a file transfer protocol-enabled server to turn on Amazon CloudWatch logging for Amazon S3 events. When set, user activity can be viewed in your CloudWatch logs.

" + "documentation":"

Specifies the AWS Identity and Access Management (IAM) role that allows a server to turn on Amazon CloudWatch logging for Amazon S3 events. When set, user activity can be viewed in your CloudWatch logs.

" }, "Protocols":{ "shape":"Protocols", @@ -696,19 +696,19 @@ }, "ServerId":{ "shape":"ServerId", - "documentation":"

Specifies the unique system-assigned identifier for a file transfer protocol-enabled server that you instantiate.

" + "documentation":"

Specifies the unique system-assigned identifier for a server that you instantiate.

" }, "State":{ "shape":"State", - "documentation":"

Specifies the condition of a file transfer protocol-enabled server for the server that was described. A value of ONLINE indicates that the server can accept jobs and transfer files. A State value of OFFLINE means that the server cannot perform file transfer operations.

The states of STARTING and STOPPING indicate that the server is in an intermediate state, either not fully able to respond, or not fully offline. The values of START_FAILED or STOP_FAILED can indicate an error condition.

" + "documentation":"

Specifies the condition of a server for the server that was described. A value of ONLINE indicates that the server can accept jobs and transfer files. A State value of OFFLINE means that the server cannot perform file transfer operations.

The states of STARTING and STOPPING indicate that the server is in an intermediate state, either not fully able to respond, or not fully offline. The values of START_FAILED or STOP_FAILED can indicate an error condition.

" }, "Tags":{ "shape":"Tags", - "documentation":"

Specifies the key-value pairs that you can use to search for and group file transfer protocol-enabled servers that were assigned to the server that was described.

" + "documentation":"

Specifies the key-value pairs that you can use to search for and group servers that were assigned to the server that was described.

" }, "UserCount":{ "shape":"UserCount", - "documentation":"

Specifies the number of users that are assigned to a file transfer protocol-enabled server you specified with the ServerId.

" + "documentation":"

Specifies the number of users that are assigned to a server you specified with the ServerId.

" } }, "documentation":"

Describes the properties of a file transfer protocol-enabled server that was specified.

" @@ -739,7 +739,7 @@ }, "Role":{ "shape":"Role", - "documentation":"

Specifies the IAM role that controls your users' access to your Amazon S3 bucket. The policies attached to this role will determine the level of access you want to provide your users when transferring files into and out of your Amazon S3 bucket or buckets. The IAM role should also contain a trust relationship that allows a file transfer protocol-enabled server to access your resources when servicing your users' transfer requests.

" + "documentation":"

Specifies the IAM role that controls your users' access to your Amazon S3 bucket. The policies attached to this role will determine the level of access you want to provide your users when transferring files into and out of your Amazon S3 bucket or buckets. The IAM role should also contain a trust relationship that allows a server to access your resources when servicing your users' transfer requests.

" }, "SshPublicKeys":{ "shape":"SshPublicKeys", @@ -751,7 +751,7 @@ }, "UserName":{ "shape":"UserName", - "documentation":"

Specifies the name of the user that was requested to be described. User names are used for authentication purposes. This is the string that will be used by your user when they log in to your file transfer protocol-enabled server.

" + "documentation":"

Specifies the name of the user that was requested to be described. User names are used for authentication purposes. This is the string that will be used by your user when they log in to your server.

" } }, "documentation":"

Describes the properties of a user that was specified.

" @@ -761,19 +761,23 @@ "members":{ "AddressAllocationIds":{ "shape":"AddressAllocationIds", - "documentation":"

A list of address allocation IDs that are required to attach an Elastic IP address to your file transfer protocol-enabled server's endpoint. This is only valid in the UpdateServer API.

This property can only be use when EndpointType is set to VPC.

" + "documentation":"

A list of address allocation IDs that are required to attach an Elastic IP address to your server's endpoint.

This property can only be set when EndpointType is set to VPC and it is only valid in the UpdateServer API.

" }, "SubnetIds":{ "shape":"SubnetIds", - "documentation":"

A list of subnet IDs that are required to host your file transfer protocol-enabled server endpoint in your VPC.

This property can only be used when EndpointType is set to VPC.

" + "documentation":"

A list of subnet IDs that are required to host your server endpoint in your VPC.

This property can only be set when EndpointType is set to VPC.

" }, "VpcEndpointId":{ "shape":"VpcEndpointId", - "documentation":"

The ID of the VPC endpoint.

This property can only be used when EndpointType is set to VPC_ENDPOINT.

" + "documentation":"

The ID of the VPC endpoint.

This property can only be set when EndpointType is set to VPC_ENDPOINT.

" }, "VpcId":{ "shape":"VpcId", - "documentation":"

The VPC ID of the VPC in which a file transfer protocol-enabled server's endpoint will be hosted.

This property can only be used when EndpointType is set to VPC.

" + "documentation":"

The VPC ID of the VPC in which a server's endpoint will be hosted.

This property can only be set when EndpointType is set to VPC.

" + }, + "SecurityGroupIds":{ + "shape":"SecurityGroupIds", + "documentation":"

A list of security groups IDs that are available to attach to your server's endpoint.

This property can only be set when EndpointType is set to VPC.

You can only edit the SecurityGroupIds property in the UpdateServer API and only if you are changing the EndpointType from PUBLIC or VPC_ENDPOINT to VPC.

" } }, "documentation":"

The virtual private cloud (VPC) endpoint settings that are configured for your file transfer protocol-enabled server. With a VPC endpoint, you can restrict access to your server and resources only within your VPC. To control incoming internet traffic, invoke the UpdateServer API and attach an Elastic IP to your server's endpoint.

" @@ -861,7 +865,7 @@ "members":{ "ServerId":{ "shape":"ServerId", - "documentation":"

A system-assigned unique identifier for a file transfer protocol-enabled server.

" + "documentation":"

A system-assigned unique identifier for a server.

" }, "SshPublicKeyBody":{ "shape":"SshPublicKeyBody", @@ -869,7 +873,7 @@ }, "UserName":{ "shape":"UserName", - "documentation":"

The name of the user account that is assigned to one or more file transfer protocol-enabled servers.

" + "documentation":"

The name of the user account that is assigned to one or more servers.

" } } }, @@ -883,7 +887,7 @@ "members":{ "ServerId":{ "shape":"ServerId", - "documentation":"

A system-assigned unique identifier for a file transfer protocol-enabled server.

" + "documentation":"

A system-assigned unique identifier for a server.

" }, "SshPublicKeyId":{ "shape":"SshPublicKeyId", @@ -894,7 +898,7 @@ "documentation":"

A user name assigned to the ServerID value that you specified.

" } }, - "documentation":"

Identifies the user, the file transfer protocol-enabled server they belong to, and the identifier of the SSH public key associated with that user. A user can have more than one key on each server that they are associated with.

" + "documentation":"

Identifies the user, the server they belong to, and the identifier of the SSH public key associated with that user. A user can have more than one key on each server that they are associated with.

" }, "InternalServiceError":{ "type":"structure", @@ -956,11 +960,11 @@ "members":{ "MaxResults":{ "shape":"MaxResults", - "documentation":"

Specifies the number of file transfer protocol-enabled servers to return as a response to the ListServers query.

" + "documentation":"

Specifies the number of servers to return as a response to the ListServers query.

" }, "NextToken":{ "shape":"NextToken", - "documentation":"

When additional results are obtained from the ListServers command, a NextToken parameter is returned in the output. You can then pass the NextToken parameter in a subsequent command to continue listing additional file transfer protocol-enabled servers.

" + "documentation":"

When additional results are obtained from the ListServers command, a NextToken parameter is returned in the output. You can then pass the NextToken parameter in a subsequent command to continue listing additional servers.

" } } }, @@ -970,11 +974,11 @@ "members":{ "NextToken":{ "shape":"NextToken", - "documentation":"

When you can get additional results from the ListServers operation, a NextToken parameter is returned in the output. In a following command, you can pass in the NextToken parameter to continue listing additional file transfer protocol-enabled servers.

" + "documentation":"

When you can get additional results from the ListServers operation, a NextToken parameter is returned in the output. In a following command, you can pass in the NextToken parameter to continue listing additional servers.

" }, "Servers":{ "shape":"ListedServers", - "documentation":"

An array of file transfer protocol-enabled servers that were listed.

" + "documentation":"

An array of servers that were listed.

" } } }, @@ -1027,7 +1031,7 @@ }, "ServerId":{ "shape":"ServerId", - "documentation":"

A system-assigned unique identifier for a file transfer protocol-enabled server that has users assigned to it.

" + "documentation":"

A system-assigned unique identifier for a server that has users assigned to it.

" } } }, @@ -1044,7 +1048,7 @@ }, "ServerId":{ "shape":"ServerId", - "documentation":"

A system-assigned unique identifier for a file transfer protocol-enabled server that the users are assigned to.

" + "documentation":"

A system-assigned unique identifier for a server that the users are assigned to.

" }, "Users":{ "shape":"ListedUsers", @@ -1058,31 +1062,31 @@ "members":{ "Arn":{ "shape":"Arn", - "documentation":"

Specifies the unique Amazon Resource Name (ARN) for a file transfer protocol-enabled server to be listed.

" + "documentation":"

Specifies the unique Amazon Resource Name (ARN) for a server to be listed.

" }, "IdentityProviderType":{ "shape":"IdentityProviderType", - "documentation":"

Specifies the authentication method used to validate a user for a file transfer protocol-enabled server that was specified. This can include Secure Shell (SSH), user name and password combinations, or your own custom authentication method. Valid values include SERVICE_MANAGED or API_GATEWAY.

" + "documentation":"

Specifies the authentication method used to validate a user for a server that was specified. This can include Secure Shell (SSH), user name and password combinations, or your own custom authentication method. Valid values include SERVICE_MANAGED or API_GATEWAY.

" }, "EndpointType":{ "shape":"EndpointType", - "documentation":"

Specifies the type of VPC endpoint that your file transfer protocol-enabled server is connected to. If your server is connected to a VPC endpoint, your server isn't accessible over the public internet.

" + "documentation":"

Specifies the type of VPC endpoint that your server is connected to. If your server is connected to a VPC endpoint, your server isn't accessible over the public internet.

" }, "LoggingRole":{ "shape":"Role", - "documentation":"

Specifies the AWS Identity and Access Management (IAM) role that allows a file transfer protocol-enabled server to turn on Amazon CloudWatch logging.

" + "documentation":"

Specifies the AWS Identity and Access Management (IAM) role that allows a server to turn on Amazon CloudWatch logging.

" }, "ServerId":{ "shape":"ServerId", - "documentation":"

Specifies the unique system assigned identifier for a file transfer protocol-enabled servers that were listed.

" + "documentation":"

Specifies the unique system assigned identifier for the servers that were listed.

" }, "State":{ "shape":"State", - "documentation":"

Specifies the condition of a file transfer protocol-enabled server for the server that was described. A value of ONLINE indicates that the server can accept jobs and transfer files. A State value of OFFLINE means that the server cannot perform file transfer operations.

The states of STARTING and STOPPING indicate that the server is in an intermediate state, either not fully able to respond, or not fully offline. The values of START_FAILED or STOP_FAILED can indicate an error condition.

" + "documentation":"

Specifies the condition of a server for the server that was described. A value of ONLINE indicates that the server can accept jobs and transfer files. A State value of OFFLINE means that the server cannot perform file transfer operations.

The states of STARTING and STOPPING indicate that the server is in an intermediate state, either not fully able to respond, or not fully offline. The values of START_FAILED or STOP_FAILED can indicate an error condition.

" }, "UserCount":{ "shape":"UserCount", - "documentation":"

Specifies the number of users that are assigned to a file transfer protocol-enabled server you specified with the ServerId.

" + "documentation":"

Specifies the number of users that are assigned to a server you specified with the ServerId.

" } }, "documentation":"

Returns properties of a file transfer protocol-enabled server that was specified.

" @@ -1210,6 +1214,16 @@ "min":20, "pattern":"arn:.*role/.*" }, + "SecurityGroupId":{ + "type":"string", + "max":20, + "min":11, + "pattern":"^sg-[0-9a-f]{8,17}$" + }, + "SecurityGroupIds":{ + "type":"list", + "member":{"shape":"SecurityGroupId"} + }, "SecurityPolicyName":{ "type":"string", "max":100, @@ -1295,7 +1309,7 @@ "members":{ "ServerId":{ "shape":"ServerId", - "documentation":"

A system-assigned unique identifier for a file transfer protocol-enabled server that you start.

" + "documentation":"

A system-assigned unique identifier for a server that you start.

" } } }, @@ -1318,7 +1332,7 @@ "members":{ "ServerId":{ "shape":"ServerId", - "documentation":"

A system-assigned unique identifier for a file transfer protocol-enabled server that you stopped.

" + "documentation":"

A system-assigned unique identifier for a server that you stopped.

" } } }, @@ -1391,7 +1405,7 @@ "members":{ "ServerId":{ "shape":"ServerId", - "documentation":"

A system-assigned identifier for a specific file transfer protocol-enabled server. That server's user authentication method is tested with a user name and password.

" + "documentation":"

A system-assigned identifier for a specific server. That server's user authentication method is tested with a user name and password.

" }, "ServerProtocol":{ "shape":"Protocol", @@ -1471,15 +1485,15 @@ }, "EndpointDetails":{ "shape":"EndpointDetails", - "documentation":"

The virtual private cloud (VPC) endpoint settings that are configured for your file transfer protocol-enabled server. With a VPC endpoint, you can restrict access to your server to resources only within your VPC. To control incoming internet traffic, you will need to associate one or more Elastic IP addresses with your server's endpoint.

" + "documentation":"

The virtual private cloud (VPC) endpoint settings that are configured for your server. With a VPC endpoint, you can restrict access to your server to resources only within your VPC. To control incoming internet traffic, you will need to associate one or more Elastic IP addresses with your server's endpoint.

" }, "EndpointType":{ "shape":"EndpointType", - "documentation":"

The type of endpoint that you want your file transfer protocol-enabled server to connect to. You can choose to connect to the public internet or a VPC endpoint. With a VPC endpoint, you can restrict access to your server and resources only within your VPC.

It is recommended that you use VPC as the EndpointType. With this endpoint type, you have the option to directly associate up to three Elastic IPv4 addresses (BYO IP included) with your server's endpoint and use VPC security groups to restrict traffic by the client's public IP address. This is not possible with EndpointType set to VPC_ENDPOINT.

" + "documentation":"

The type of endpoint that you want your server to connect to. You can choose to connect to the public internet or a VPC endpoint. With a VPC endpoint, you can restrict access to your server and resources only within your VPC.

It is recommended that you use VPC as the EndpointType. With this endpoint type, you have the option to directly associate up to three Elastic IPv4 addresses (BYO IP included) with your server's endpoint and use VPC security groups to restrict traffic by the client's public IP address. This is not possible with EndpointType set to VPC_ENDPOINT.

" }, "HostKey":{ "shape":"HostKey", - "documentation":"

The RSA private key as generated by ssh-keygen -N \"\" -m PEM -f my-new-server-key.

If you aren't planning to migrate existing users from an existing file transfer protocol-enabled server to a new server, don't update the host key. Accidentally changing a server's host key can be disruptive.

For more information, see Change the host key for your SFTP-enabled server in the AWS Transfer Family User Guide.

" + "documentation":"

The RSA private key as generated by ssh-keygen -N \"\" -m PEM -f my-new-server-key.

If you aren't planning to migrate existing users from an existing server to a new server, don't update the host key. Accidentally changing a server's host key can be disruptive.

For more information, see Change the host key for your SFTP-enabled server in the AWS Transfer Family User Guide.

" }, "IdentityProviderDetails":{ "shape":"IdentityProviderDetails", @@ -1499,7 +1513,7 @@ }, "ServerId":{ "shape":"ServerId", - "documentation":"

A system-assigned unique identifier for a file transfer protocol-enabled server instance that the user account is assigned to.

" + "documentation":"

A system-assigned unique identifier for a server instance that the user account is assigned to.

" } } }, @@ -1509,7 +1523,7 @@ "members":{ "ServerId":{ "shape":"ServerId", - "documentation":"

A system-assigned unique identifier for a file transfer protocol-enabled server that the user account is assigned to.

" + "documentation":"

A system-assigned unique identifier for a server that the user account is assigned to.

" } } }, @@ -1522,11 +1536,11 @@ "members":{ "HomeDirectory":{ "shape":"HomeDirectory", - "documentation":"

Specifies the landing directory (folder) for a user when they log in to the file transfer protocol-enabled server using their file transfer protocol client.

An example is your-Amazon-S3-bucket-name>/home/username.

" + "documentation":"

Specifies the landing directory (folder) for a user when they log in to the server using their file transfer protocol client.

An example is your-Amazon-S3-bucket-name>/home/username.

" }, "HomeDirectoryType":{ "shape":"HomeDirectoryType", - "documentation":"

The type of landing directory (folder) you want your users' home directory to be when they log into the file transfer protocol-enabled server. If you set it to PATH, the user will see the absolute Amazon S3 bucket paths as is in their file transfer protocol clients. If you set it LOGICAL, you will need to provide mappings in the HomeDirectoryMappings for how you want to make Amazon S3 paths visible to your users.

" + "documentation":"

The type of landing directory (folder) you want your users' home directory to be when they log into the server. If you set it to PATH, the user will see the absolute Amazon S3 bucket paths as is in their file transfer protocol clients. If you set it LOGICAL, you will need to provide mappings in the HomeDirectoryMappings for how you want to make Amazon S3 paths visible to your users.

" }, "HomeDirectoryMappings":{ "shape":"HomeDirectoryMappings", @@ -1538,15 +1552,15 @@ }, "Role":{ "shape":"Role", - "documentation":"

The IAM role that controls your users' access to your Amazon S3 bucket. The policies attached to this role will determine the level of access you want to provide your users when transferring files into and out of your Amazon S3 bucket or buckets. The IAM role should also contain a trust relationship that allows the file transfer protocol-enabled server to access your resources when servicing your users' transfer requests.

" + "documentation":"

The IAM role that controls your users' access to your Amazon S3 bucket. The policies attached to this role will determine the level of access you want to provide your users when transferring files into and out of your Amazon S3 bucket or buckets. The IAM role should also contain a trust relationship that allows the server to access your resources when servicing your users' transfer requests.

" }, "ServerId":{ "shape":"ServerId", - "documentation":"

A system-assigned unique identifier for a file transfer protocol-enabled server instance that the user account is assigned to.

" + "documentation":"

A system-assigned unique identifier for a server instance that the user account is assigned to.

" }, "UserName":{ "shape":"UserName", - "documentation":"

A unique string that identifies a user and is associated with a file transfer protocol-enabled server as specified by the ServerId. This user name must be a minimum of 3 and a maximum of 100 characters long. The following are valid characters: a-z, A-Z, 0-9, underscore '_', hyphen '-', period '.', and at sign '@'. The user name can't start with a hyphen, period, and at sign.

" + "documentation":"

A unique string that identifies a user and is associated with a server as specified by the ServerId. This user name must be a minimum of 3 and a maximum of 100 characters long. The following are valid characters: a-z, A-Z, 0-9, underscore '_', hyphen '-', period '.', and at sign '@'. The user name can't start with a hyphen, period, or at sign.

" } } }, @@ -1559,14 +1573,14 @@ "members":{ "ServerId":{ "shape":"ServerId", - "documentation":"

A system-assigned unique identifier for a file transfer protocol-enabled server instance that the user account is assigned to.

" + "documentation":"

A system-assigned unique identifier for a server instance that the user account is assigned to.

" }, "UserName":{ "shape":"UserName", - "documentation":"

The unique identifier for a user that is assigned to a file transfer protocol-enabled server instance that was specified in the request.

" + "documentation":"

The unique identifier for a user that is assigned to a server instance that was specified in the request.

" } }, - "documentation":"

UpdateUserResponse returns the user name and file transfer protocol-enabled server identifier for the request to update a user's properties.

" + "documentation":"

UpdateUserResponse returns the user name and identifier for the request to update a user's properties.

" }, "Url":{ "type":"string", diff --git a/services/translate/pom.xml b/services/translate/pom.xml index 641b1bee3828..1d61c89db9ab 100644 --- a/services/translate/pom.xml +++ b/services/translate/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT 4.0.0 translate diff --git a/services/translate/src/main/resources/codegen-resources/paginators-1.json b/services/translate/src/main/resources/codegen-resources/paginators-1.json index 67f44714aca2..7447f8e1bf87 100644 --- a/services/translate/src/main/resources/codegen-resources/paginators-1.json +++ b/services/translate/src/main/resources/codegen-resources/paginators-1.json @@ -1,5 +1,10 @@ { "pagination": { + "ListParallelData": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken" + }, "ListTerminologies": { "input_token": "NextToken", "limit_key": "MaxResults", diff --git a/services/translate/src/main/resources/codegen-resources/service-2.json b/services/translate/src/main/resources/codegen-resources/service-2.json index 0eb1ba3e13eb..c1947e759c45 100644 --- a/services/translate/src/main/resources/codegen-resources/service-2.json +++ b/services/translate/src/main/resources/codegen-resources/service-2.json @@ -13,6 +13,40 @@ "uid":"translate-2017-07-01" }, "operations":{ + "CreateParallelData":{ + "name":"CreateParallelData", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateParallelDataRequest"}, + "output":{"shape":"CreateParallelDataResponse"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidRequestException"}, + {"shape":"LimitExceededException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Creates a parallel data resource in Amazon Translate by importing an input file from Amazon S3. Parallel data files contain examples of source phrases and their translations from your translation memory. By adding parallel data, you can influence the style, tone, and word choice in your translation output.

" + }, + "DeleteParallelData":{ + "name":"DeleteParallelData", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteParallelDataRequest"}, + "output":{"shape":"DeleteParallelDataResponse"}, + "errors":[ + {"shape":"ConcurrentModificationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Deletes a parallel data resource in Amazon Translate.

" + }, "DeleteTerminology":{ "name":"DeleteTerminology", "http":{ @@ -43,6 +77,22 @@ ], "documentation":"

Gets the properties associated with an asycnhronous batch translation job including name, ID, status, source and target languages, input/output S3 buckets, and so on.

" }, + "GetParallelData":{ + "name":"GetParallelData", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetParallelDataRequest"}, + "output":{"shape":"GetParallelDataResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Provides information about a parallel data resource.

" + }, "GetTerminology":{ "name":"GetTerminology", "http":{ @@ -75,6 +125,21 @@ ], "documentation":"

Creates or updates a custom terminology, depending on whether or not one already exists for the given terminology name. Importing a terminology with the same name as an existing one will merge the terminologies based on the chosen merge strategy. Currently, the only supported merge strategy is OVERWRITE, and so the imported terminology will overwrite an existing terminology of the same name.

If you import a terminology that overwrites an existing one, the new terminology take up to 10 minutes to fully propagate and be available for use in a translation due to cache policies with the DataPlane service that performs the translations.

" }, + "ListParallelData":{ + "name":"ListParallelData", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListParallelDataRequest"}, + "output":{"shape":"ListParallelDataResponse"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Provides a list of your parallel data resources in Amazon Translate.

" + }, "ListTerminologies":{ "name":"ListTerminologies", "http":{ @@ -157,6 +222,26 @@ {"shape":"ServiceUnavailableException"} ], "documentation":"

Translates input text from the source language to the target language. For a list of available languages and language codes, see what-is-languages.

" + }, + "UpdateParallelData":{ + "name":"UpdateParallelData", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateParallelDataRequest"}, + "output":{"shape":"UpdateParallelDataResponse"}, + "errors":[ + {"shape":"ConcurrentModificationException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidRequestException"}, + {"shape":"LimitExceededException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Updates a previously created parallel data resource by importing a new input file from Amazon S3.

" } }, "shapes":{ @@ -190,11 +275,91 @@ "min":1, "pattern":"^[a-zA-Z0-9-]+$" }, + "ConcurrentModificationException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

Another modification is being made. That modification must complete before you can make your change.

", + "exception":true + }, + "ConflictException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

There was a conflict processing the request. Try your request again.

", + "exception":true + }, "ContentType":{ "type":"string", "max":256, "pattern":"^[-\\w.]+\\/[-\\w.+]+$" }, + "CreateParallelDataRequest":{ + "type":"structure", + "required":[ + "Name", + "ParallelDataConfig", + "ClientToken" + ], + "members":{ + "Name":{ + "shape":"ResourceName", + "documentation":"

A custom name for the parallel data resource in Amazon Translate. You must assign a name that is unique in the account and region.

" + }, + "Description":{ + "shape":"Description", + "documentation":"

A custom description for the parallel data resource in Amazon Translate.

" + }, + "ParallelDataConfig":{ + "shape":"ParallelDataConfig", + "documentation":"

Specifies the format and S3 location of the parallel data input file.

" + }, + "EncryptionKey":{"shape":"EncryptionKey"}, + "ClientToken":{ + "shape":"ClientTokenString", + "documentation":"

A unique identifier for the request. This token is automatically generated when you use Amazon Translate through an AWS SDK.

", + "idempotencyToken":true + } + } + }, + "CreateParallelDataResponse":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"ResourceName", + "documentation":"

The custom name that you assigned to the parallel data resource.

" + }, + "Status":{ + "shape":"ParallelDataStatus", + "documentation":"

The status of the parallel data resource. When the resource is ready for you to use, the status is ACTIVE.

" + } + } + }, + "DeleteParallelDataRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"ResourceName", + "documentation":"

The name of the parallel data resource that is being deleted.

" + } + } + }, + "DeleteParallelDataResponse":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"ResourceName", + "documentation":"

The name of the parallel data resource that is being deleted.

" + }, + "Status":{ + "shape":"ParallelDataStatus", + "documentation":"

The status of the parallel data deletion.

" + } + } + }, "DeleteTerminologyRequest":{ "type":"structure", "required":["Name"], @@ -257,7 +422,7 @@ "documentation":"

The Amazon Resource Name (ARN) of the encryption key being used to encrypt the custom terminology.

" } }, - "documentation":"

The encryption key used to encrypt the custom terminologies used by Amazon Translate.

" + "documentation":"

The encryption key used to encrypt this object.

" }, "EncryptionKeyID":{ "type":"string", @@ -269,6 +434,37 @@ "type":"string", "enum":["KMS"] }, + "GetParallelDataRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"ResourceName", + "documentation":"

The name of the parallel data resource that is being retrieved.

" + } + } + }, + "GetParallelDataResponse":{ + "type":"structure", + "members":{ + "ParallelDataProperties":{ + "shape":"ParallelDataProperties", + "documentation":"

The properties of the parallel data resource that is being retrieved.

" + }, + "DataLocation":{ + "shape":"ParallelDataDataLocation", + "documentation":"

The location of the most recent parallel data input file that was successfully imported into Amazon Translate. The location is returned as a presigned URL that has a 30 minute expiration.

" + }, + "AuxiliaryDataLocation":{ + "shape":"ParallelDataDataLocation", + "documentation":"

The Amazon S3 location of a file that provides any errors or warnings that were produced by your input file. This file was created when Amazon Translate attempted to create a parallel data resource. The location is returned as a presigned URL to that has a 30 minute expiration.

" + }, + "LatestUpdateAttemptAuxiliaryDataLocation":{ + "shape":"ParallelDataDataLocation", + "documentation":"

The Amazon S3 location of a file that provides any errors or warnings that were produced by your input file. This file was created when Amazon Translate attempted to update a parallel data resource. The location is returned as a presigned URL to that has a 30 minute expiration.

" + } + } + }, "GetTerminologyRequest":{ "type":"structure", "required":[ @@ -455,6 +651,32 @@ "documentation":"

The specified limit has been exceeded. Review your request and retry it with a quantity below the stated limit.

", "exception":true }, + "ListParallelDataRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

A string that specifies the next page of results to return in a paginated response.

" + }, + "MaxResults":{ + "shape":"MaxResultsInteger", + "documentation":"

The maximum number of parallel data resources returned for each request.

" + } + } + }, + "ListParallelDataResponse":{ + "type":"structure", + "members":{ + "ParallelDataPropertiesList":{ + "shape":"ParallelDataPropertiesList", + "documentation":"

The properties of the parallel data resources returned by this request.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The string to use in a subsequent request to get the next page of results in a paginated response. This value is null if there are no additional pages.

" + } + } + }, "ListTerminologiesRequest":{ "type":"structure", "members":{ @@ -511,6 +733,7 @@ } } }, + "Long":{"type":"long"}, "MaxResultsInteger":{ "type":"integer", "max":500, @@ -536,6 +759,140 @@ }, "documentation":"

The output configuration properties for a batch translation job.

" }, + "ParallelDataArn":{ + "type":"string", + "max":512, + "min":1 + }, + "ParallelDataConfig":{ + "type":"structure", + "required":[ + "S3Uri", + "Format" + ], + "members":{ + "S3Uri":{ + "shape":"S3Uri", + "documentation":"

The URI of the Amazon S3 folder that contains the parallel data input file. The folder must be in the same Region as the API endpoint you are calling.

" + }, + "Format":{ + "shape":"ParallelDataFormat", + "documentation":"

The format of the parallel data input file.

" + } + }, + "documentation":"

Specifies the format and S3 location of the parallel data input file.

" + }, + "ParallelDataDataLocation":{ + "type":"structure", + "required":[ + "RepositoryType", + "Location" + ], + "members":{ + "RepositoryType":{ + "shape":"String", + "documentation":"

Describes the repository that contains the parallel data input file.

" + }, + "Location":{ + "shape":"String", + "documentation":"

The Amazon S3 location of the parallel data input file. The location is returned as a presigned URL to that has a 30 minute expiration.

" + } + }, + "documentation":"

The location of the most recent parallel data input file that was successfully imported into Amazon Translate.

" + }, + "ParallelDataFormat":{ + "type":"string", + "enum":[ + "TSV", + "CSV", + "TMX" + ] + }, + "ParallelDataProperties":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"ResourceName", + "documentation":"

The custom name assigned to the parallel data resource.

" + }, + "Arn":{ + "shape":"ParallelDataArn", + "documentation":"

The Amazon Resource Name (ARN) of the parallel data resource.

" + }, + "Description":{ + "shape":"Description", + "documentation":"

The description assigned to the parallel data resource.

" + }, + "Status":{ + "shape":"ParallelDataStatus", + "documentation":"

The status of the parallel data resource. When the parallel data is ready for you to use, the status is ACTIVE.

" + }, + "SourceLanguageCode":{ + "shape":"LanguageCodeString", + "documentation":"

The source language of the translations in the parallel data file.

" + }, + "TargetLanguageCodes":{ + "shape":"LanguageCodeStringList", + "documentation":"

The language codes for the target languages available in the parallel data file. All possible target languages are returned as an array.

" + }, + "ParallelDataConfig":{ + "shape":"ParallelDataConfig", + "documentation":"

Specifies the format and S3 location of the parallel data input file.

" + }, + "Message":{ + "shape":"UnboundedLengthString", + "documentation":"

Additional information from Amazon Translate about the parallel data resource.

" + }, + "ImportedDataSize":{ + "shape":"Long", + "documentation":"

The number of UTF-8 characters that Amazon Translate imported from the parallel data input file. This number includes only the characters in your translation examples. It does not include characters that are used to format your file. For example, if you provided a Translation Memory Exchange (.tmx) file, this number does not include the tags.

" + }, + "ImportedRecordCount":{ + "shape":"Long", + "documentation":"

The number of records successfully imported from the parallel data input file.

" + }, + "FailedRecordCount":{ + "shape":"Long", + "documentation":"

The number of records unsuccessfully imported from the parallel data input file.

" + }, + "SkippedRecordCount":{ + "shape":"Long", + "documentation":"

The number of items in the input file that Amazon Translate skipped when you created or updated the parallel data resource. For example, Amazon Translate skips empty records, empty target texts, and empty lines.

" + }, + "EncryptionKey":{"shape":"EncryptionKey"}, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

The time at which the parallel data resource was created.

" + }, + "LastUpdatedAt":{ + "shape":"Timestamp", + "documentation":"

The time at which the parallel data resource was last updated.

" + }, + "LatestUpdateAttemptStatus":{ + "shape":"ParallelDataStatus", + "documentation":"

The status of the most recent update attempt for the parallel data resource.

" + }, + "LatestUpdateAttemptAt":{ + "shape":"Timestamp", + "documentation":"

The time that the most recent update was attempted.

" + } + }, + "documentation":"

The properties of a parallel data resource.

" + }, + "ParallelDataPropertiesList":{ + "type":"list", + "member":{"shape":"ParallelDataProperties"} + }, + "ParallelDataStatus":{ + "type":"string", + "enum":[ + "CREATING", + "UPDATING", + "ACTIVE", + "DELETING", + "FAILED" + ] + }, "ResourceName":{ "type":"string", "max":256, @@ -607,6 +964,10 @@ "shape":"ResourceNameList", "documentation":"

The name of the terminology to use in the batch translation job. For a list of available terminologies, use the ListTerminologies operation.

" }, + "ParallelDataNames":{ + "shape":"ResourceNameList", + "documentation":"

The names of the parallel data resources to use in the batch translation job. For a list of available parallel data resources, use the ListParallelData operation.

" + }, "ClientToken":{ "shape":"ClientTokenString", "documentation":"

A unique identifier for the request. This token is auto-generated when using the Amazon Translate SDK.

", @@ -843,6 +1204,10 @@ "shape":"ResourceNameList", "documentation":"

A list containing the names of the terminologies applied to a translation job. Only one terminology can be applied per StartTextTranslationJob request at this time.

" }, + "ParallelDataNames":{ + "shape":"ResourceNameList", + "documentation":"

A list containing the names of the parallel data resources applied to the translation job.

" + }, "Message":{ "shape":"UnboundedLengthString", "documentation":"

An explanation of any errors that may have occured during the translation job.

" @@ -951,6 +1316,54 @@ }, "documentation":"

Amazon Translate does not support translation from the language of the source text into the requested target language. For more information, see how-to-error-msg.

", "exception":true + }, + "UpdateParallelDataRequest":{ + "type":"structure", + "required":[ + "Name", + "ParallelDataConfig", + "ClientToken" + ], + "members":{ + "Name":{ + "shape":"ResourceName", + "documentation":"

The name of the parallel data resource being updated.

" + }, + "Description":{ + "shape":"Description", + "documentation":"

A custom description for the parallel data resource in Amazon Translate.

" + }, + "ParallelDataConfig":{ + "shape":"ParallelDataConfig", + "documentation":"

Specifies the format and S3 location of the parallel data input file.

" + }, + "ClientToken":{ + "shape":"ClientTokenString", + "documentation":"

A unique identifier for the request. This token is automatically generated when you use Amazon Translate through an AWS SDK.

", + "idempotencyToken":true + } + } + }, + "UpdateParallelDataResponse":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"ResourceName", + "documentation":"

The name of the parallel data resource being updated.

" + }, + "Status":{ + "shape":"ParallelDataStatus", + "documentation":"

The status of the parallel data resource that you are attempting to update. Your update request is accepted only if this status is either ACTIVE or FAILED.

" + }, + "LatestUpdateAttemptStatus":{ + "shape":"ParallelDataStatus", + "documentation":"

The status of the parallel data update attempt. When the updated parallel data resource is ready for you to use, the status is ACTIVE.

" + }, + "LatestUpdateAttemptAt":{ + "shape":"Timestamp", + "documentation":"

The time that the most recent update was attempted.

" + } + } } }, "documentation":"

Provides translation between one source language and another of the same set of languages.

" diff --git a/services/waf/pom.xml b/services/waf/pom.xml index fc9defd889f7..fec7d8371aed 100644 --- a/services/waf/pom.xml +++ b/services/waf/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT waf AWS Java SDK :: Services :: AWS WAF diff --git a/services/wafv2/pom.xml b/services/wafv2/pom.xml index 549ff5bd9b4d..5df9118d8030 100644 --- a/services/wafv2/pom.xml +++ b/services/wafv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT wafv2 AWS Java SDK :: Services :: WAFV2 diff --git a/services/workdocs/pom.xml b/services/workdocs/pom.xml index b5d0a2a7484f..16490c22434e 100644 --- a/services/workdocs/pom.xml +++ b/services/workdocs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT workdocs AWS Java SDK :: Services :: Amazon WorkDocs diff --git a/services/worklink/pom.xml b/services/worklink/pom.xml index 0417598ecce5..8ae68f232279 100644 --- a/services/worklink/pom.xml +++ b/services/worklink/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT worklink AWS Java SDK :: Services :: WorkLink diff --git a/services/workmail/pom.xml b/services/workmail/pom.xml index f3789f30d196..25fab935dd85 100644 --- a/services/workmail/pom.xml +++ b/services/workmail/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT 4.0.0 workmail diff --git a/services/workmail/src/main/resources/codegen-resources/service-2.json b/services/workmail/src/main/resources/codegen-resources/service-2.json index 65b6c32c5741..affbfdcbdfe0 100644 --- a/services/workmail/src/main/resources/codegen-resources/service-2.json +++ b/services/workmail/src/main/resources/codegen-resources/service-2.json @@ -111,6 +111,24 @@ "documentation":"

Creates a group that can be used in Amazon WorkMail by calling the RegisterToWorkMail operation.

", "idempotent":true }, + "CreateOrganization":{ + "name":"CreateOrganization", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateOrganizationRequest"}, + "output":{"shape":"CreateOrganizationResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"DirectoryInUseException"}, + {"shape":"DirectoryUnavailableException"}, + {"shape":"LimitExceededException"}, + {"shape":"NameAvailabilityException"} + ], + "documentation":"

Creates a new Amazon WorkMail organization. Optionally, you can choose to associate an existing AWS Directory Service directory with your organization. If an AWS Directory Service directory ID is specified, the organization alias must match the directory alias. If you choose not to associate an existing directory with your organization, then we create a new Amazon WorkMail directory for you. For more information, see Adding an organization in the Amazon WorkMail Administrator Guide.

You can associate multiple email domains with an organization, then set your default email domain from the Amazon WorkMail console. You can also associate a domain that is managed in an Amazon Route 53 public hosted zone. For more information, see Adding a domain and Choosing the default domain in the Amazon WorkMail Administrator Guide.

Optionally, you can use a customer managed master key from AWS Key Management Service (AWS KMS) to encrypt email for your organization. If you don't associate an AWS KMS key, Amazon WorkMail creates a default AWS managed master key for you.

", + "idempotent":true + }, "CreateResource":{ "name":"CreateResource", "http":{ @@ -223,6 +241,22 @@ "documentation":"

Deletes permissions granted to a member (user or group).

", "idempotent":true }, + "DeleteOrganization":{ + "name":"DeleteOrganization", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteOrganizationRequest"}, + "output":{"shape":"DeleteOrganizationResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"OrganizationNotFoundException"}, + {"shape":"OrganizationStateException"} + ], + "documentation":"

Deletes an Amazon WorkMail organization and all underlying AWS resources managed by Amazon WorkMail as part of the organization. You can choose whether to delete the associated directory. For more information, see Removing an organization in the Amazon WorkMail Administrator Guide.

", + "idempotent":true + }, "DeleteResource":{ "name":"DeleteResource", "http":{ @@ -1110,6 +1144,46 @@ } } }, + "CreateOrganizationRequest":{ + "type":"structure", + "required":["Alias"], + "members":{ + "DirectoryId":{ + "shape":"DirectoryId", + "documentation":"

The AWS Directory Service directory ID.

" + }, + "Alias":{ + "shape":"OrganizationName", + "documentation":"

The organization alias.

" + }, + "ClientToken":{ + "shape":"IdempotencyClientToken", + "documentation":"

The idempotency token associated with the request.

", + "idempotencyToken":true + }, + "Domains":{ + "shape":"Domains", + "documentation":"

The email domains to associate with the organization.

" + }, + "KmsKeyArn":{ + "shape":"KmsKeyArn", + "documentation":"

The Amazon Resource Name (ARN) of a customer managed master key from AWS KMS.

" + }, + "EnableInteroperability":{ + "shape":"Boolean", + "documentation":"

When true, allows organization interoperability between Amazon WorkMail and Microsoft Exchange. Can only be set to true if an AD Connector directory ID is included in the request.

" + } + } + }, + "CreateOrganizationResponse":{ + "type":"structure", + "members":{ + "OrganizationId":{ + "shape":"OrganizationId", + "documentation":"

The organization ID.

" + } + } + }, "CreateResourceRequest":{ "type":"structure", "required":[ @@ -1156,7 +1230,7 @@ }, "Name":{ "shape":"UserName", - "documentation":"

The name for the new user. Simple AD or AD Connector user names have a maximum length of 20. All others have a maximum length of 64.

" + "documentation":"

The name for the new user. WorkMail directory user names have a maximum length of 64. All others have a maximum length of 20.

" }, "DisplayName":{ "shape":"String", @@ -1280,7 +1354,7 @@ }, "EntityId":{ "shape":"WorkMailIdentifier", - "documentation":"

The identifier of the member (user or group)that owns the mailbox.

" + "documentation":"

The identifier of the member (user or group) that owns the mailbox.

" }, "GranteeId":{ "shape":"WorkMailIdentifier", @@ -1293,6 +1367,41 @@ "members":{ } }, + "DeleteOrganizationRequest":{ + "type":"structure", + "required":[ + "OrganizationId", + "DeleteDirectory" + ], + "members":{ + "ClientToken":{ + "shape":"IdempotencyClientToken", + "documentation":"

The idempotency token associated with the request.

", + "idempotencyToken":true + }, + "OrganizationId":{ + "shape":"OrganizationId", + "documentation":"

The organization ID.

" + }, + "DeleteDirectory":{ + "shape":"Boolean", + "documentation":"

If true, deletes the AWS Directory Service directory associated with the organization.

" + } + } + }, + "DeleteOrganizationResponse":{ + "type":"structure", + "members":{ + "OrganizationId":{ + "shape":"OrganizationId", + "documentation":"

The organization ID.

" + }, + "State":{ + "shape":"String", + "documentation":"

The state of the organization.

" + } + } + }, "DeleteResourceRequest":{ "type":"structure", "required":[ @@ -1662,6 +1771,20 @@ "min":0, "pattern":"[\\S\\s]*" }, + "DirectoryId":{ + "type":"string", + "max":12, + "min":12, + "pattern":"^d-[0-9a-f]{10}$" + }, + "DirectoryInUseException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

The directory is already in use by another WorkMail organization in the same account and Region.

", + "exception":true + }, "DirectoryServiceAuthenticationFailedException":{ "type":"structure", "members":{ @@ -1675,7 +1798,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

The directory on which you are trying to perform operations isn't available.

", + "documentation":"

The directory is unavailable. It might be located in another Region or deleted.

", "exception":true }, "DisassociateDelegateFromResourceRequest":{ @@ -1732,11 +1855,37 @@ "members":{ } }, + "Domain":{ + "type":"structure", + "members":{ + "DomainName":{ + "shape":"DomainName", + "documentation":"

The fully qualified domain name.

" + }, + "HostedZoneId":{ + "shape":"HostedZoneId", + "documentation":"

The hosted zone ID for a domain hosted in Route 53. Required when configuring a domain hosted in Route 53.

" + } + }, + "documentation":"

The domain to associate with an Amazon WorkMail organization.

When you configure a domain hosted in Amazon Route 53 (Route 53), all recommended DNS records are added to the organization when you create it. For more information, see Adding a domain in the Amazon WorkMail Administrator Guide.

" + }, + "DomainName":{ + "type":"string", + "max":255, + "min":3, + "pattern":"[a-zA-Z0-9.-]+\\.[a-zA-Z-]{2,}" + }, + "Domains":{ + "type":"list", + "member":{"shape":"Domain"}, + "max":5, + "min":0 + }, "EmailAddress":{ "type":"string", "max":254, "min":1, - "pattern":"[a-zA-Z0-9._%+-]{1,64}@[a-zA-Z0-9.-]+\\.[a-zA-Z]{2,}" + "pattern":"[a-zA-Z0-9._%+-]{1,64}@[a-zA-Z0-9.-]+\\.[a-zA-Z-]{2,}" }, "EmailAddressInUseException":{ "type":"structure", @@ -1955,6 +2104,12 @@ "type":"list", "member":{"shape":"Group"} }, + "HostedZoneId":{ + "type":"string", + "max":32, + "min":1, + "pattern":"[\\S\\s]*" + }, "IdempotencyClientToken":{ "type":"string", "max":128, @@ -2507,17 +2662,20 @@ "NextToken":{ "type":"string", "max":1024, - "min":1 + "min":1, + "pattern":"[\\S\\s]*|[a-zA-Z0-9/+=]{1,1024}" }, "OrganizationId":{ "type":"string", + "max":34, + "min":34, "pattern":"^m-[0-9a-f]{32}$" }, "OrganizationName":{ "type":"string", "max":62, "min":1, - "pattern":"^(?!d-)([\\da-zA-Z]+)([-]*[\\da-zA-Z])*" + "pattern":"^(?!d-)([\\da-zA-Z]+)([-][\\da-zA-Z]+)*" }, "OrganizationNotFoundException":{ "type":"structure", @@ -2532,7 +2690,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

The organization must have a valid state (Active or Synchronizing) to perform certain operations on the organization or its members.

", + "documentation":"

The organization must have a valid state to perform certain operations on the organization or its members.

", "exception":true }, "OrganizationSummaries":{ @@ -2550,6 +2708,10 @@ "shape":"OrganizationName", "documentation":"

The alias associated with the organization.

" }, + "DefaultMailDomain":{ + "shape":"DomainName", + "documentation":"

The default email domain associated with the organization.

" + }, "ErrorMessage":{ "shape":"String", "documentation":"

The error message associated with the organization. It is only present if unexpected behavior has occurred with regards to the organization. It provides insight or solutions regarding unexpected behavior.

" @@ -2842,13 +3004,15 @@ }, "ResourceId":{ "type":"string", + "max":34, + "min":34, "pattern":"^r-[0-9a-f]{32}$" }, "ResourceName":{ "type":"string", "max":20, "min":1, - "pattern":"[\\w\\-.]+(@[a-zA-Z0-9.\\-]+\\.[a-zA-Z0-9]{2,})?" + "pattern":"[\\w\\-.]+(@[a-zA-Z0-9.\\-]+\\.[a-zA-Z0-9-]{2,})?" }, "ResourceNotFoundException":{ "type":"structure", @@ -3199,7 +3363,7 @@ "type":"string", "max":64, "min":1, - "pattern":"[\\w\\-.]+(@[a-zA-Z0-9.\\-]+\\.[a-zA-Z0-9]{2,})?" + "pattern":"[\\w\\-.]+(@[a-zA-Z0-9.\\-]+\\.[a-zA-Z0-9-]{2,})?" }, "UserRole":{ "type":"string", diff --git a/services/workmailmessageflow/pom.xml b/services/workmailmessageflow/pom.xml index db471076938e..d416aa0d709f 100644 --- a/services/workmailmessageflow/pom.xml +++ b/services/workmailmessageflow/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT workmailmessageflow AWS Java SDK :: Services :: WorkMailMessageFlow diff --git a/services/workspaces/pom.xml b/services/workspaces/pom.xml index 697b994119c2..97a3f6824ed9 100644 --- a/services/workspaces/pom.xml +++ b/services/workspaces/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT workspaces AWS Java SDK :: Services :: Amazon WorkSpaces diff --git a/services/workspaces/src/main/resources/codegen-resources/service-2.json b/services/workspaces/src/main/resources/codegen-resources/service-2.json index 2f03c999addf..c99da7d6e769 100644 --- a/services/workspaces/src/main/resources/codegen-resources/service-2.json +++ b/services/workspaces/src/main/resources/codegen-resources/service-2.json @@ -82,7 +82,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InvalidParameterValuesException"} ], - "documentation":"

Copies the specified image from the specified Region to the current Region.

" + "documentation":"

Copies the specified image from the specified Region to the current Region. For more information about copying images, see Copy a Custom WorkSpaces Image.

Before copying a shared image, be sure to verify that it has been shared from the correct AWS account. To determine if an image has been shared and to see the AWS account ID that owns an image, use the DescribeWorkSpaceImages and DescribeWorkspaceImagePermissions API operations.

" }, "CreateConnectionAlias":{ "name":"CreateConnectionAlias", @@ -486,7 +486,7 @@ {"shape":"InvalidParameterValuesException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Retrieves a list of IP address ranges, specified as IPv4 CIDR blocks, that you can use for the network management interface when you enable Bring Your Own License (BYOL).

The management network interface is connected to a secure Amazon WorkSpaces management network. It is used for interactive streaming of the WorkSpace desktop to Amazon WorkSpaces clients, and to allow Amazon WorkSpaces to manage the WorkSpace.

" + "documentation":"

Retrieves a list of IP address ranges, specified as IPv4 CIDR blocks, that you can use for the network management interface when you enable Bring Your Own License (BYOL).

This operation can be run only by AWS accounts that are enabled for BYOL. If your account isn't enabled for BYOL, you'll receive an AccessDeniedException error.

The management network interface is connected to a secure Amazon WorkSpaces management network. It is used for interactive streaming of the WorkSpace desktop to Amazon WorkSpaces clients, and to allow Amazon WorkSpaces to manage the WorkSpace.

" }, "MigrateWorkspace":{ "name":"MigrateWorkspace", @@ -716,7 +716,7 @@ }, "input":{"shape":"TerminateWorkspacesRequest"}, "output":{"shape":"TerminateWorkspacesResult"}, - "documentation":"

Terminates the specified WorkSpaces.

Terminating a WorkSpace is a permanent action and cannot be undone. The user's data is destroyed. If you need to archive any user data, contact Amazon Web Services before terminating the WorkSpace.

You can terminate a WorkSpace that is in any state except SUSPENDED.

This operation is asynchronous and returns before the WorkSpaces have been completely terminated.

" + "documentation":"

Terminates the specified WorkSpaces.

Terminating a WorkSpace is a permanent action and cannot be undone. The user's data is destroyed. If you need to archive any user data, contact AWS Support before terminating the WorkSpace.

You can terminate a WorkSpace that is in any state except SUSPENDED.

This operation is asynchronous and returns before the WorkSpaces have been completely terminated. After a WorkSpace is terminated, the TERMINATED state is returned only briefly before the WorkSpace directory metadata is cleaned up, so this state is rarely returned. To confirm that a WorkSpace is terminated, check for the WorkSpace ID by using DescribeWorkSpaces. If the WorkSpace ID isn't returned, then the WorkSpace has been successfully terminated.

" }, "UpdateConnectionAliasPermission":{ "name":"UpdateConnectionAliasPermission", @@ -769,7 +769,7 @@ {"shape":"InvalidParameterValuesException"}, {"shape":"OperationNotSupportedException"} ], - "documentation":"

Shares or unshares an image with one account by specifying whether that account has permission to copy the image. If the copy image permission is granted, the image is shared with that account. If the copy image permission is revoked, the image is unshared with the account.

  • To delete an image that has been shared, you must unshare the image before you delete it.

  • Sharing Bring Your Own License (BYOL) images across AWS accounts isn't supported at this time in the AWS GovCloud (US-West) Region. To share BYOL images across accounts in the AWS GovCloud (US-West) Region, contact AWS Support.

" + "documentation":"

Shares or unshares an image with one account by specifying whether that account has permission to copy the image. If the copy image permission is granted, the image is shared with that account. If the copy image permission is revoked, the image is unshared with the account. For more information about sharing images, see Share or Unshare a Custom WorkSpaces Image.

  • To delete an image that has been shared, you must unshare the image before you delete it.

  • Sharing Bring Your Own License (BYOL) images across AWS accounts isn't supported at this time in the AWS GovCloud (US-West) Region. To share BYOL images across accounts in the AWS GovCloud (US-West) Region, contact AWS Support.

" } }, "shapes":{ @@ -1221,7 +1221,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

The tags. Each WorkSpaces resource can have a maximum of 50 tags. If you want to add new tags to a set of existing tags, you must submit all of the existing tags along with the new ones.

" + "documentation":"

The tags. Each WorkSpaces resource can have a maximum of 50 tags.

" } } }, @@ -1923,7 +1923,7 @@ "documentation":"

The identifier of the AWS account that an image has been shared with.

" } }, - "documentation":"

Describes the AWS accounts that have been granted permission to use a shared image.

" + "documentation":"

Describes the AWS accounts that have been granted permission to use a shared image. For more information about sharing images, see Share or Unshare a Custom WorkSpaces Image.

" }, "ImagePermissions":{ "type":"list", @@ -2877,7 +2877,7 @@ }, "SharedAccountId":{ "shape":"AwsAccount", - "documentation":"

The identifier of the AWS account to share or unshare the image with.

" + "documentation":"

The identifier of the AWS account to share or unshare the image with.

Before sharing the image, confirm that you are sharing to the correct AWS account ID.

" } } }, @@ -2924,7 +2924,7 @@ }, "State":{ "shape":"WorkspaceState", - "documentation":"

The operational state of the WorkSpace.

" + "documentation":"

The operational state of the WorkSpace.

After a WorkSpace is terminated, the TERMINATED state is returned only briefly before the WorkSpace directory metadata is cleaned up, so this state is rarely returned. To confirm that a WorkSpace is terminated, check for the WorkSpace ID by using DescribeWorkSpaces. If the WorkSpace ID isn't returned, then the WorkSpace has been successfully terminated.

" }, "BundleId":{ "shape":"BundleId", @@ -3146,7 +3146,7 @@ }, "State":{ "shape":"WorkspaceDirectoryState", - "documentation":"

The state of the directory's registration with Amazon WorkSpaces.

" + "documentation":"

The state of the directory's registration with Amazon WorkSpaces. After a directory is deregistered, the DEREGISTERED state is returned very briefly before the directory metadata is cleaned up, so this state is rarely returned. To confirm that a directory is deregistered, check for the directory ID by using DescribeWorkspaceDirectories. If the directory ID isn't returned, then the directory has been successfully deregistered.

" }, "WorkspaceCreationProperties":{ "shape":"DefaultWorkspaceCreationProperties", diff --git a/services/xray/pom.xml b/services/xray/pom.xml index 4d6eb5d32db5..e4a7b312c8be 100644 --- a/services/xray/pom.xml +++ b/services/xray/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT xray AWS Java SDK :: Services :: AWS X-Ray diff --git a/services/xray/src/main/resources/codegen-resources/paginators-1.json b/services/xray/src/main/resources/codegen-resources/paginators-1.json index 0b4d3d86af97..5ffeb594d62f 100644 --- a/services/xray/src/main/resources/codegen-resources/paginators-1.json +++ b/services/xray/src/main/resources/codegen-resources/paginators-1.json @@ -2,9 +2,6 @@ "pagination": { "BatchGetTraces": { "input_token": "NextToken", - "non_aggregate_keys": [ - "UnprocessedTraceIds" - ], "output_token": "NextToken", "result_key": "Traces" }, @@ -13,6 +10,16 @@ "output_token": "NextToken", "result_key": "Groups" }, + "GetInsightEvents": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken" + }, + "GetInsightSummaries": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken" + }, "GetSamplingRules": { "input_token": "NextToken", "output_token": "NextToken", @@ -25,19 +32,11 @@ }, "GetServiceGraph": { "input_token": "NextToken", - "non_aggregate_keys": [ - "StartTime", - "EndTime", - "ContainsOldGroupVersions" - ], "output_token": "NextToken", "result_key": "Services" }, "GetTimeSeriesServiceStatistics": { "input_token": "NextToken", - "non_aggregate_keys": [ - "ContainsOldGroupVersions" - ], "output_token": "NextToken", "result_key": "TimeSeriesServiceStatistics" }, @@ -48,10 +47,6 @@ }, "GetTraceSummaries": { "input_token": "NextToken", - "non_aggregate_keys": [ - "TracesProcessedCount", - "ApproximateTime" - ], "output_token": "NextToken", "result_key": "TraceSummaries" } diff --git a/services/xray/src/main/resources/codegen-resources/service-2.json b/services/xray/src/main/resources/codegen-resources/service-2.json index 9738653c1d1b..f5d91cf6479a 100644 --- a/services/xray/src/main/resources/codegen-resources/service-2.json +++ b/services/xray/src/main/resources/codegen-resources/service-2.json @@ -123,6 +123,62 @@ ], "documentation":"

Retrieves all active group details.

" }, + "GetInsight":{ + "name":"GetInsight", + "http":{ + "method":"POST", + "requestUri":"/Insight" + }, + "input":{"shape":"GetInsightRequest"}, + "output":{"shape":"GetInsightResult"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ThrottledException"} + ], + "documentation":"

Retrieves the summary information of an insight. This includes impact to clients and root cause services, the top anomalous services, the category, the state of the insight, and the start and end time of the insight.

" + }, + "GetInsightEvents":{ + "name":"GetInsightEvents", + "http":{ + "method":"POST", + "requestUri":"/InsightEvents" + }, + "input":{"shape":"GetInsightEventsRequest"}, + "output":{"shape":"GetInsightEventsResult"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ThrottledException"} + ], + "documentation":"

X-Ray reevaluates insights periodically until they're resolved, and records each intermediate state as an event. You can review an insight's events in the Impact Timeline on the Inspect page in the X-Ray console.

" + }, + "GetInsightImpactGraph":{ + "name":"GetInsightImpactGraph", + "http":{ + "method":"POST", + "requestUri":"/InsightImpactGraph" + }, + "input":{"shape":"GetInsightImpactGraphRequest"}, + "output":{"shape":"GetInsightImpactGraphResult"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ThrottledException"} + ], + "documentation":"

Retrieves a service graph structure filtered by the specified insight. The service graph is limited to only structural information. For a complete service graph, use this API with the GetServiceGraph API.

" + }, + "GetInsightSummaries":{ + "name":"GetInsightSummaries", + "http":{ + "method":"POST", + "requestUri":"/InsightSummaries" + }, + "input":{"shape":"GetInsightSummariesRequest"}, + "output":{"shape":"GetInsightSummariesResult"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ThrottledException"} + ], + "documentation":"

Retrieves the summaries of all insights in the specified group matching the provided filter values.

" + }, "GetSamplingRules":{ "name":"GetSamplingRules", "http":{ @@ -276,7 +332,7 @@ {"shape":"InvalidRequestException"}, {"shape":"ThrottledException"} ], - "documentation":"

Uploads segment documents to AWS X-Ray. The X-Ray SDK generates segment documents and sends them to the X-Ray daemon, which uploads them in batches. A segment document can be a completed segment, an in-progress segment, or an array of subsegments.

Segments must include the following fields. For the full segment document schema, see AWS X-Ray Segment Documents in the AWS X-Ray Developer Guide.

Required Segment Document Fields

  • name - The name of the service that handled the request.

  • id - A 64-bit identifier for the segment, unique among segments in the same trace, in 16 hexadecimal digits.

  • trace_id - A unique identifier that connects all segments and subsegments originating from a single client request.

  • start_time - Time the segment or subsegment was created, in floating point seconds in epoch time, accurate to milliseconds. For example, 1480615200.010 or 1.480615200010E9.

  • end_time - Time the segment or subsegment was closed. For example, 1480615200.090 or 1.480615200090E9. Specify either an end_time or in_progress.

  • in_progress - Set to true instead of specifying an end_time to record that a segment has been started, but is not complete. Send an in progress segment when your application receives a request that will take a long time to serve, to trace the fact that the request was received. When the response is sent, send the complete segment to overwrite the in-progress segment.

A trace_id consists of three numbers separated by hyphens. For example, 1-58406520-a006649127e371903a2de979. This includes:

Trace ID Format

  • The version number, i.e. 1.

  • The time of the original request, in Unix epoch time, in 8 hexadecimal digits. For example, 10:00AM December 2nd, 2016 PST in epoch time is 1480615200 seconds, or 58406520 in hexadecimal.

  • A 96-bit identifier for the trace, globally unique, in 24 hexadecimal digits.

" + "documentation":"

Uploads segment documents to AWS X-Ray. The X-Ray SDK generates segment documents and sends them to the X-Ray daemon, which uploads them in batches. A segment document can be a completed segment, an in-progress segment, or an array of subsegments.

Segments must include the following fields. For the full segment document schema, see AWS X-Ray Segment Documents in the AWS X-Ray Developer Guide.

Required segment document fields

  • name - The name of the service that handled the request.

  • id - A 64-bit identifier for the segment, unique among segments in the same trace, in 16 hexadecimal digits.

  • trace_id - A unique identifier that connects all segments and subsegments originating from a single client request.

  • start_time - Time the segment or subsegment was created, in floating point seconds in epoch time, accurate to milliseconds. For example, 1480615200.010 or 1.480615200010E9.

  • end_time - Time the segment or subsegment was closed. For example, 1480615200.090 or 1.480615200090E9. Specify either an end_time or in_progress.

  • in_progress - Set to true instead of specifying an end_time to record that a segment has been started, but is not complete. Send an in-progress segment when your application receives a request that will take a long time to serve, to trace that the request was received. When the response is sent, send the complete segment to overwrite the in-progress segment.

A trace_id consists of three numbers separated by hyphens. For example, 1-58406520-a006649127e371903a2de979. This includes:

Trace ID Format

  • The version number, for instance, 1.

  • The time of the original request, in Unix epoch time, in 8 hexadecimal digits. For example, 10:00AM December 2nd, 2016 PST in epoch time is 1480615200 seconds, or 58406520 in hexadecimal.

  • A 96-bit identifier for the trace, globally unique, in 24 hexadecimal digits.

" }, "TagResource":{ "name":"TagResource", @@ -387,13 +443,24 @@ "documentation":"

Value for a String annotation.

" } }, - "documentation":"

Value of a segment annotation. Has one of three value types: Number, Boolean or String.

" + "documentation":"

Value of a segment annotation. Has one of three value types: Number, Boolean, or String.

" }, "Annotations":{ "type":"map", "key":{"shape":"AnnotationKey"}, "value":{"shape":"ValuesWithServiceIds"} }, + "AnomalousService":{ + "type":"structure", + "members":{ + "ServiceId":{"shape":"ServiceId"} + }, + "documentation":"

The service within the service graph that has anomalously high fault rates.

" + }, + "AnomalousServiceList":{ + "type":"list", + "member":{"shape":"AnomalousService"} + }, "AttributeKey":{ "type":"string", "max":32, @@ -415,10 +482,10 @@ "members":{ "Name":{ "shape":"String", - "documentation":"

The name of a corresponding availability zone.

" + "documentation":"

The name of a corresponding Availability Zone.

" } }, - "documentation":"

A list of availability zones corresponding to the segments in a trace.

" + "documentation":"

A list of Availability Zones corresponding to the segments in a trace.

" }, "BackendConnectionErrors":{ "type":"structure", @@ -505,7 +572,7 @@ }, "InsightsConfiguration":{ "shape":"InsightsConfiguration", - "documentation":"

The structure containing configurations related to insights. The InsightsEnabled boolean can be set to true to enable insights for the new group or false to disable insights for the new group.

" + "documentation":"

The structure containing configurations related to insights.

  • The InsightsEnabled boolean can be set to true to enable insights for the new group or false to disable insights for the new group.

  • The NotifcationsEnabled boolean can be set to true to enable insights notifications for the new group. Notifications may only be enabled on a group with InsightsEnabled set to true.

" }, "Tags":{ "shape":"TagList", @@ -518,7 +585,7 @@ "members":{ "Group":{ "shape":"Group", - "documentation":"

The group that was created. Contains the name of the group that was created, the ARN of the group that was generated based on the group name, the filter expression, and the insight configuration that was assigned to the group.

" + "documentation":"

The group that was created. Contains the name of the group that was created, the Amazon Resource Name (ARN) of the group that was generated based on the group name, the filter expression, and the insight configuration that was assigned to the group.

" } } }, @@ -785,6 +852,7 @@ }, "documentation":"

Information about requests that failed with a 4xx Client Error status code.

" }, + "EventSummaryText":{"type":"string"}, "FaultRootCause":{ "type":"structure", "members":{ @@ -879,6 +947,20 @@ "max":1, "min":0 }, + "ForecastStatistics":{ + "type":"structure", + "members":{ + "FaultCountHigh":{ + "shape":"NullableLong", + "documentation":"

The upper limit of fault counts for a service.

" + }, + "FaultCountLow":{ + "shape":"NullableLong", + "documentation":"

The lower limit of fault counts for a service.

" + } + }, + "documentation":"

The predicted high and low fault count. This is used to determine if a service has become anomalous and if an insight should be created.

" + }, "GetEncryptionConfigRequest":{ "type":"structure", "members":{ @@ -911,7 +993,7 @@ "members":{ "Group":{ "shape":"Group", - "documentation":"

The group that was requested. Contains the name of the group, the ARN of the group, and the filter expression that assigned to the group.

" + "documentation":"

The group that was requested. Contains the name of the group, the ARN of the group, the filter expression, and the insight configuration assigned to the group.

" } } }, @@ -942,6 +1024,175 @@ } } }, + "GetInsightEventsMaxResults":{ + "type":"integer", + "max":50, + "min":1 + }, + "GetInsightEventsRequest":{ + "type":"structure", + "required":["InsightId"], + "members":{ + "InsightId":{ + "shape":"InsightId", + "documentation":"

The insight's unique identifier. Use the GetInsightSummaries action to retrieve an InsightId.

" + }, + "MaxResults":{ + "shape":"GetInsightEventsMaxResults", + "documentation":"

Used to retrieve at most the specified value of events.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

Specify the pagination token returned by a previous request to retrieve the next page of events.

" + } + } + }, + "GetInsightEventsResult":{ + "type":"structure", + "members":{ + "InsightEvents":{ + "shape":"InsightEventList", + "documentation":"

A detailed description of the event. This includes the time of the event, client and root cause impact statistics, and the top anomalous service at the time of the event.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

Use this token to retrieve the next page of insight events.

" + } + } + }, + "GetInsightImpactGraphRequest":{ + "type":"structure", + "required":[ + "InsightId", + "StartTime", + "EndTime" + ], + "members":{ + "InsightId":{ + "shape":"InsightId", + "documentation":"

The insight's unique identifier. Use the GetInsightSummaries action to retrieve an InsightId.

" + }, + "StartTime":{ + "shape":"Timestamp", + "documentation":"

The estimated start time of the insight, in Unix time seconds. The StartTime is inclusive of the value provided and can't be more than 30 days old.

" + }, + "EndTime":{ + "shape":"Timestamp", + "documentation":"

The estimated end time of the insight, in Unix time seconds. The EndTime is exclusive of the value provided. The time range between the start time and end time can't be more than six hours.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

Specify the pagination token returned by a previous request to retrieve the next page of results.

" + } + } + }, + "GetInsightImpactGraphResult":{ + "type":"structure", + "members":{ + "InsightId":{ + "shape":"InsightId", + "documentation":"

The insight's unique identifier.

" + }, + "StartTime":{ + "shape":"Timestamp", + "documentation":"

The provided start time.

" + }, + "EndTime":{ + "shape":"Timestamp", + "documentation":"

The provided end time.

" + }, + "ServiceGraphStartTime":{ + "shape":"Timestamp", + "documentation":"

The time, in Unix seconds, at which the service graph started.

" + }, + "ServiceGraphEndTime":{ + "shape":"Timestamp", + "documentation":"

The time, in Unix seconds, at which the service graph ended.

" + }, + "Services":{ + "shape":"InsightImpactGraphServiceList", + "documentation":"

The AWS instrumented services related to the insight.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

Pagination token.

" + } + } + }, + "GetInsightRequest":{ + "type":"structure", + "required":["InsightId"], + "members":{ + "InsightId":{ + "shape":"InsightId", + "documentation":"

The insight's unique identifier. Use the GetInsightSummaries action to retrieve an InsightId.

" + } + } + }, + "GetInsightResult":{ + "type":"structure", + "members":{ + "Insight":{ + "shape":"Insight", + "documentation":"

The summary information of an insight.

" + } + } + }, + "GetInsightSummariesMaxResults":{ + "type":"integer", + "max":100, + "min":1 + }, + "GetInsightSummariesRequest":{ + "type":"structure", + "required":[ + "StartTime", + "EndTime" + ], + "members":{ + "States":{ + "shape":"InsightStateList", + "documentation":"

The list of insight states.

" + }, + "GroupARN":{ + "shape":"GroupARN", + "documentation":"

The Amazon Resource Name (ARN) of the group. Required if the GroupName isn't provided.

" + }, + "GroupName":{ + "shape":"GroupName", + "documentation":"

The name of the group. Required if the GroupARN isn't provided.

" + }, + "StartTime":{ + "shape":"Timestamp", + "documentation":"

The beginning of the time frame in which the insights started. The start time can't be more than 30 days old.

" + }, + "EndTime":{ + "shape":"Timestamp", + "documentation":"

The end of the time frame in which the insights ended. The end time can't be more than 30 days old.

" + }, + "MaxResults":{ + "shape":"GetInsightSummariesMaxResults", + "documentation":"

The maximum number of results to display.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

Pagination token.

" + } + } + }, + "GetInsightSummariesResult":{ + "type":"structure", + "members":{ + "InsightSummaries":{ + "shape":"InsightSummaryList", + "documentation":"

The summary of each insight within the group matching the provided filters. The summary contains the InsightID, start and end time, the root cause service, the root cause and client impact statistics, the top anomalous services, and the status of the insight.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

Pagination token.

" + } + } + }, "GetSamplingRulesRequest":{ "type":"structure", "members":{ @@ -1030,11 +1281,11 @@ }, "GroupName":{ "shape":"GroupName", - "documentation":"

The name of a group to generate a graph based on.

" + "documentation":"

The name of a group based on which you want to generate a graph.

" }, "GroupARN":{ "shape":"GroupARN", - "documentation":"

The ARN of a group to generate a graph based on.

" + "documentation":"

The Amazon Resource Name (ARN) of a group based on which you want to generate a graph.

" }, "NextToken":{ "shape":"String", @@ -1088,7 +1339,7 @@ }, "GroupARN":{ "shape":"GroupARN", - "documentation":"

The ARN of the group for which to pull statistics from.

" + "documentation":"

The Amazon Resource Name (ARN) of the group for which to pull statistics from.

" }, "EntitySelectorExpression":{ "shape":"EntitySelectorExpression", @@ -1098,6 +1349,10 @@ "shape":"NullableInteger", "documentation":"

Aggregation period in seconds.

" }, + "ForecastStatistics":{ + "shape":"NullableBoolean", + "documentation":"

The forecasted high and low fault count values. Forecast enabled requests require the EntitySelectorExpression ID be provided.

" + }, "NextToken":{ "shape":"String", "documentation":"

Pagination token.

" @@ -1113,7 +1368,7 @@ }, "ContainsOldGroupVersions":{ "shape":"Boolean", - "documentation":"

A flag indicating whether or not a group's filter expression has been consistent, or if a returned aggregation may show statistics from an older version of the group's filter expression.

" + "documentation":"

A flag indicating whether or not a group's filter expression has been consistent, or if a returned aggregation might show statistics from an older version of the group's filter expression.

" }, "NextToken":{ "shape":"String", @@ -1173,7 +1428,7 @@ }, "SamplingStrategy":{ "shape":"SamplingStrategy", - "documentation":"

A paramater to indicate whether to enable sampling on trace summaries. Input parameters are Name and Value.

" + "documentation":"

A parameter to indicate whether to enable sampling on trace summaries. Input parameters are Name and Value.

" }, "FilterExpression":{ "shape":"FilterExpression", @@ -1202,7 +1457,7 @@ }, "NextToken":{ "shape":"String", - "documentation":"

If the requested time frame contained more than one page of results, you can use this token to retrieve the next page. The first page contains the most most recent results, closest to the end of the time frame.

" + "documentation":"

If the requested time frame contained more than one page of results, you can use this token to retrieve the next page. The first page contains the most recent results, closest to the end of the time frame.

" } } }, @@ -1215,7 +1470,7 @@ }, "GroupARN":{ "shape":"String", - "documentation":"

The ARN of the group generated based on the GroupName.

" + "documentation":"

The Amazon Resource Name (ARN) of the group generated based on the GroupName.

" }, "FilterExpression":{ "shape":"String", @@ -1223,7 +1478,7 @@ }, "InsightsConfiguration":{ "shape":"InsightsConfiguration", - "documentation":"

The structure containing configurations related to insights. The InsightsEnabled boolean can be set to true to enable insights for the group or false to disable insights for the group.

" + "documentation":"

The structure containing configurations related to insights.

  • The InsightsEnabled boolean can be set to true to enable insights for the group or false to disable insights for the group.

  • The NotifcationsEnabled boolean can be set to true to enable insights notifications through Amazon EventBridge for the group.

" } }, "documentation":"

Details and metadata for a group.

" @@ -1255,7 +1510,7 @@ }, "InsightsConfiguration":{ "shape":"InsightsConfiguration", - "documentation":"

The structure containing configurations related to insights. The InsightsEnabled boolean can be set to true to enable insights for the groups or false to disable insights for the groups.

" + "documentation":"

The structure containing configurations related to insights.

  • The InsightsEnabled boolean can be set to true to enable insights for the group or false to disable insights for the group.

  • The NotificationsEnabled boolean can be set to true to enable insights notifications. Notifications can only be enabled on a group with InsightsEnabled set to true.

" } }, "documentation":"

Details for a group without metadata.

" @@ -1320,12 +1575,230 @@ }, "documentation":"

Information about an HTTP request.

" }, + "Insight":{ + "type":"structure", + "members":{ + "InsightId":{ + "shape":"InsightId", + "documentation":"

The insights unique identifier.

" + }, + "GroupARN":{ + "shape":"GroupARN", + "documentation":"

The Amazon Resource Name (ARN) of the group that the insight belongs to.

" + }, + "GroupName":{ + "shape":"GroupName", + "documentation":"

The name of the group that the insight belongs to.

" + }, + "RootCauseServiceId":{"shape":"ServiceId"}, + "Categories":{ + "shape":"InsightCategoryList", + "documentation":"

The categories that label and describe the type of insight.

" + }, + "State":{ + "shape":"InsightState", + "documentation":"

The current state of the insight.

" + }, + "StartTime":{ + "shape":"Timestamp", + "documentation":"

The time, in Unix seconds, at which the insight began.

" + }, + "EndTime":{ + "shape":"Timestamp", + "documentation":"

The time, in Unix seconds, at which the insight ended.

" + }, + "Summary":{ + "shape":"InsightSummaryText", + "documentation":"

A brief description of the insight.

" + }, + "ClientRequestImpactStatistics":{ + "shape":"RequestImpactStatistics", + "documentation":"

The impact statistics of the client side service. This includes the number of requests to the client service and whether the requests were faults or okay.

" + }, + "RootCauseServiceRequestImpactStatistics":{ + "shape":"RequestImpactStatistics", + "documentation":"

The impact statistics of the root cause service. This includes the number of requests to the client service and whether the requests were faults or okay.

" + }, + "TopAnomalousServices":{ + "shape":"AnomalousServiceList", + "documentation":"

The service within the insight that is most impacted by the incident.

" + } + }, + "documentation":"

When fault rates go outside of the expected range, X-Ray creates an insight. Insights tracks emergent issues within your applications.

" + }, + "InsightCategory":{ + "type":"string", + "enum":["FAULT"] + }, + "InsightCategoryList":{ + "type":"list", + "member":{"shape":"InsightCategory"} + }, + "InsightEvent":{ + "type":"structure", + "members":{ + "Summary":{ + "shape":"EventSummaryText", + "documentation":"

A brief description of the event.

" + }, + "EventTime":{ + "shape":"Timestamp", + "documentation":"

The time, in Unix seconds, at which the event was recorded.

" + }, + "ClientRequestImpactStatistics":{ + "shape":"RequestImpactStatistics", + "documentation":"

The impact statistics of the client side service. This includes the number of requests to the client service and whether the requests were faults or okay.

" + }, + "RootCauseServiceRequestImpactStatistics":{ + "shape":"RequestImpactStatistics", + "documentation":"

The impact statistics of the root cause service. This includes the number of requests to the client service and whether the requests were faults or okay.

" + }, + "TopAnomalousServices":{ + "shape":"AnomalousServiceList", + "documentation":"

The service during the event that is most impacted by the incident.

" + } + }, + "documentation":"

X-Ray reevaluates insights periodically until they are resolved, and records each intermediate state in an event. You can review incident events in the Impact Timeline on the Inspect page in the X-Ray console.

" + }, + "InsightEventList":{ + "type":"list", + "member":{"shape":"InsightEvent"} + }, + "InsightId":{ + "type":"string", + "pattern":"[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[1-5][0-9a-fA-F]{3}-[89abAB][0-9a-fA-F]{3}-[0-9a-fA-F]{12}" + }, + "InsightImpactGraphEdge":{ + "type":"structure", + "members":{ + "ReferenceId":{ + "shape":"NullableInteger", + "documentation":"

Identifier of the edge. Unique within a service map.

" + } + }, + "documentation":"

The connection between two service in an insight impact graph.

" + }, + "InsightImpactGraphEdgeList":{ + "type":"list", + "member":{"shape":"InsightImpactGraphEdge"} + }, + "InsightImpactGraphService":{ + "type":"structure", + "members":{ + "ReferenceId":{ + "shape":"NullableInteger", + "documentation":"

Identifier for the service. Unique within the service map.

" + }, + "Type":{ + "shape":"String", + "documentation":"

Identifier for the service. Unique within the service map.

  • AWS Resource - The type of an AWS resource. For example, AWS::EC2::Instance for an application running on Amazon EC2 or AWS::DynamoDB::Table for an Amazon DynamoDB table that the application used.

  • AWS Service - The type of an AWS service. For example, AWS::DynamoDB for downstream calls to Amazon DynamoDB that didn't target a specific table.

  • AWS Service - The type of an AWS service. For example, AWS::DynamoDB for downstream calls to Amazon DynamoDB that didn't target a specific table.

  • remote - A downstream service of indeterminate type.

" + }, + "Name":{ + "shape":"String", + "documentation":"

The canonical name of the service.

" + }, + "Names":{ + "shape":"ServiceNames", + "documentation":"

A list of names for the service, including the canonical name.

" + }, + "AccountId":{ + "shape":"String", + "documentation":"

Identifier of the AWS account in which the service runs.

" + }, + "Edges":{ + "shape":"InsightImpactGraphEdgeList", + "documentation":"

Connections to downstream services.

" + } + }, + "documentation":"

Information about an application that processed requests, users that made requests, or downstream services, resources, and applications that an application used.

" + }, + "InsightImpactGraphServiceList":{ + "type":"list", + "member":{"shape":"InsightImpactGraphService"} + }, + "InsightState":{ + "type":"string", + "enum":[ + "ACTIVE", + "CLOSED" + ] + }, + "InsightStateList":{ + "type":"list", + "member":{"shape":"InsightState"}, + "max":1, + "min":0 + }, + "InsightSummary":{ + "type":"structure", + "members":{ + "InsightId":{ + "shape":"InsightId", + "documentation":"

The insights unique identifier.

" + }, + "GroupARN":{ + "shape":"GroupARN", + "documentation":"

The Amazon Resource Name (ARN) of the group that the insight belongs to.

" + }, + "GroupName":{ + "shape":"GroupName", + "documentation":"

The name of the group that the insight belongs to.

" + }, + "RootCauseServiceId":{"shape":"ServiceId"}, + "Categories":{ + "shape":"InsightCategoryList", + "documentation":"

Categories The categories that label and describe the type of insight.

" + }, + "State":{ + "shape":"InsightState", + "documentation":"

The current state of the insight.

" + }, + "StartTime":{ + "shape":"Timestamp", + "documentation":"

The time, in Unix seconds, at which the insight began.

" + }, + "EndTime":{ + "shape":"Timestamp", + "documentation":"

The time, in Unix seconds, at which the insight ended.

" + }, + "Summary":{ + "shape":"InsightSummaryText", + "documentation":"

A brief description of the insight.

" + }, + "ClientRequestImpactStatistics":{ + "shape":"RequestImpactStatistics", + "documentation":"

The impact statistics of the client side service. This includes the number of requests to the client service and whether the requests were faults or okay.

" + }, + "RootCauseServiceRequestImpactStatistics":{ + "shape":"RequestImpactStatistics", + "documentation":"

The impact statistics of the root cause service. This includes the number of requests to the client service and whether the requests were faults or okay.

" + }, + "TopAnomalousServices":{ + "shape":"AnomalousServiceList", + "documentation":"

The service within the insight that is most impacted by the incident.

" + }, + "LastUpdateTime":{ + "shape":"Timestamp", + "documentation":"

The time, in Unix seconds, that the insight was last updated.

" + } + }, + "documentation":"

Information that describes an insight.

" + }, + "InsightSummaryList":{ + "type":"list", + "member":{"shape":"InsightSummary"} + }, + "InsightSummaryText":{"type":"string"}, "InsightsConfiguration":{ "type":"structure", "members":{ "InsightsEnabled":{ "shape":"NullableBoolean", "documentation":"

Set the InsightsEnabled value to true to enable insights or false to disable insights.

" + }, + "NotificationsEnabled":{ + "shape":"NullableBoolean", + "documentation":"

Set the NotificationsEnabled value to true to enable insights notifications. Notifications can only be enabled on a group with InsightsEnabled set to true.

" } }, "documentation":"

The structure containing configurations related to insights.

" @@ -1458,6 +1931,24 @@ "type":"integer", "min":0 }, + "RequestImpactStatistics":{ + "type":"structure", + "members":{ + "FaultCount":{ + "shape":"NullableLong", + "documentation":"

The number of requests that have resulted in a fault,

" + }, + "OkCount":{ + "shape":"NullableLong", + "documentation":"

The number of successful requests.

" + }, + "TotalCount":{ + "shape":"NullableLong", + "documentation":"

The total number of requests to the service.

" + } + }, + "documentation":"

Statistics that describe how the incident has impacted a service.

" + }, "ReservoirSize":{ "type":"integer", "min":0 @@ -1482,7 +1973,7 @@ "Message":{"shape":"ErrorMessage"}, "ResourceName":{"shape":"AmazonResourceName"} }, - "documentation":"

The resource was not found. Verify that the name or ARN of the resource is correct.

", + "documentation":"

The resource was not found. Verify that the name or Amazon Resource Name (ARN) of the resource is correct.

", "error":{"httpStatusCode":404}, "exception":true }, @@ -1509,7 +2000,7 @@ }, "Coverage":{ "shape":"NullableDouble", - "documentation":"

The types and messages of the exceptions.

" + "documentation":"

The type and messages of the exceptions.

" }, "Remote":{ "shape":"NullableBoolean", @@ -1765,7 +2256,7 @@ "documentation":"

The number of requests recorded.

" } }, - "documentation":"

Aggregated request sampling data for a sampling rule across all services for a 10 second window.

" + "documentation":"

Aggregated request sampling data for a sampling rule across all services for a 10-second window.

" }, "SamplingStatisticSummaryList":{ "type":"list", @@ -1847,7 +2338,7 @@ }, "ReservoirQuota":{ "shape":"NullableInteger", - "documentation":"

The number of requests per second that X-Ray allocated this service.

" + "documentation":"

The number of requests per second that X-Ray allocated for this service.

" }, "ReservoirQuotaTTL":{ "shape":"Timestamp", @@ -1912,7 +2403,7 @@ }, "Type":{ "shape":"String", - "documentation":"

The type of service.

  • AWS Resource - The type of an AWS resource. For example, AWS::EC2::Instance for a application running on Amazon EC2 or AWS::DynamoDB::Table for an Amazon DynamoDB table that the application used.

  • AWS Service - The type of an AWS service. For example, AWS::DynamoDB for downstream calls to Amazon DynamoDB that didn't target a specific table.

  • client - Represents the clients that sent requests to a root service.

  • remote - A downstream service of indeterminate type.

" + "documentation":"

The type of service.

  • AWS Resource - The type of an AWS resource. For example, AWS::EC2::Instance for an application running on Amazon EC2 or AWS::DynamoDB::Table for an Amazon DynamoDB table that the application used.

  • AWS Service - The type of an AWS service. For example, AWS::DynamoDB for downstream calls to Amazon DynamoDB that didn't target a specific table.

  • client - Represents the clients that sent requests to a root service.

  • remote - A downstream service of indeterminate type.

" }, "State":{ "shape":"String", @@ -1943,7 +2434,7 @@ "documentation":"

A histogram that maps the spread of service response times.

" } }, - "documentation":"

Information about an application that processed requests, users that made requests, or downstream services, resources and applications that an application used.

" + "documentation":"

Information about an application that processed requests, users that made requests, or downstream services, resources, and applications that an application used.

" }, "ServiceId":{ "type":"structure", @@ -2136,6 +2627,10 @@ }, "EdgeSummaryStatistics":{"shape":"EdgeStatistics"}, "ServiceSummaryStatistics":{"shape":"ServiceStatistics"}, + "ServiceForecastStatistics":{ + "shape":"ForecastStatistics", + "documentation":"

The forecasted high and low fault count values.

" + }, "ResponseTimeHistogram":{ "shape":"Histogram", "documentation":"

The response time histogram for the selected entities.

" @@ -2148,6 +2643,11 @@ "member":{"shape":"TimeSeriesServiceStatistics"} }, "Timestamp":{"type":"timestamp"}, + "Token":{ + "type":"string", + "max":2000, + "min":1 + }, "TooManyTagsException":{ "type":"structure", "members":{ @@ -2169,6 +2669,10 @@ "shape":"NullableDouble", "documentation":"

The length of time in seconds between the start time of the root segment and the end time of the last segment that completed.

" }, + "LimitExceeded":{ + "shape":"NullableBoolean", + "documentation":"

LimitExceeded is set to true when the trace has exceeded one of the defined quotas. For more information about quotas, see AWS X-Ray endpoints and quotas.

" + }, "Segments":{ "shape":"SegmentList", "documentation":"

Segment documents for the segments and subsegments that comprise the trace.

" @@ -2263,7 +2767,7 @@ }, "AvailabilityZones":{ "shape":"TraceAvailabilityZones", - "documentation":"

A list of availability zones for any zone corresponding to the trace segments.

" + "documentation":"

A list of Availability Zones for any zone corresponding to the trace segments.

" }, "EntryPoint":{ "shape":"ServiceId", @@ -2271,7 +2775,7 @@ }, "FaultRootCauses":{ "shape":"FaultRootCauses", - "documentation":"

A collection of FaultRootCause structures corresponding to the the trace segments.

" + "documentation":"

A collection of FaultRootCause structures corresponding to the trace segments.

" }, "ErrorRootCauses":{ "shape":"ErrorRootCauses", @@ -2405,7 +2909,7 @@ }, "InsightsConfiguration":{ "shape":"InsightsConfiguration", - "documentation":"

The structure containing configurations related to insights. The InsightsEnabled boolean can be set to true to enable insights for the group or false to disable insights for the group.

" + "documentation":"

The structure containing configurations related to insights.

  • The InsightsEnabled boolean can be set to true to enable insights for the group or false to disable insights for the group.

  • The NotifcationsEnabled boolean can be set to true to enable insights notifications for the group. Notifications can only be enabled on a group with InsightsEnabled set to true.

" } } }, diff --git a/test/codegen-generated-classes-test/pom.xml b/test/codegen-generated-classes-test/pom.xml index 914fe31f3c22..908774c2199d 100644 --- a/test/codegen-generated-classes-test/pom.xml +++ b/test/codegen-generated-classes-test/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT ../../pom.xml @@ -185,6 +185,11 @@ ${awsjavasdk.version} test + + io.reactivex.rxjava2 + rxjava + test + diff --git a/test/codegen-generated-classes-test/src/main/resources/codegen-resources/autoconstructedlists/customization.config b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/autoconstructedlists/customization.config deleted file mode 100644 index 53b007a003d5..000000000000 --- a/test/codegen-generated-classes-test/src/main/resources/codegen-resources/autoconstructedlists/customization.config +++ /dev/null @@ -1,8 +0,0 @@ -{ - "blacklistedSimpleMethods" : [ - "allTypes", - "nestedContainers", - "operationWithNoInputOrOutput" - ], - "useAutoConstructList": true -} \ No newline at end of file diff --git a/test/codegen-generated-classes-test/src/main/resources/codegen-resources/autoconstructedlists/service-2.json b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/autoconstructedlists/service-2.json deleted file mode 100644 index 6bda6e0c936f..000000000000 --- a/test/codegen-generated-classes-test/src/main/resources/codegen-resources/autoconstructedlists/service-2.json +++ /dev/null @@ -1,268 +0,0 @@ -{ - "version":"2.0", - "metadata":{ - "apiVersion":"2016-03-11", - "endpointPrefix":"autoconstructlists", - "jsonVersion":"1.1", - "protocol":"json", - "serviceAbbreviation":"AmazonCodeGenerationJsonRpcCustomized", - "serviceFullName":"Amazon Code Generation Json Rpc Customized", - "serviceId":"AmazonCodeGenerationJsonRpcCustomized", - "signatureVersion":"v4", - "targetPrefix":"AmazonCodeGenerationJsonRpcCustomized", - "timestampFormat":"unixTimestamp" - }, - "operations":{ - "AllTypes":{ - "name":"AllTypes", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"AllTypesStructure"}, - "output":{"shape":"AllTypesStructure"}, - "errors":[ - {"shape":"EmptyModeledException"} - ] - }, - "NestedContainers":{ - "name":"NestedContainers", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"NestedContainersStructure"}, - "output":{"shape":"NestedContainersStructure"} - }, - "OperationWithNoInputOrOutput":{ - "name":"OperationWithNoInputOrOutput", - "http":{ - "method":"POST", - "requestUri":"/" - } - }, - "StreamingInputOperation":{ - "name":"StreamingInputOperation", - "http":{ - "method":"POST", - "requestUri":"/2016-03-11/streamingInputOperation" - }, - "input":{"shape":"StructureWithStreamingMember"} - }, - "StreamingOutputOperation":{ - "name":"StreamingOutputOperation", - "http":{ - "method":"POST", - "requestUri":"/2016-03-11/streamingOutputOperation" - }, - "output":{"shape":"StructureWithStreamingMember"} - } - }, - "shapes":{ - "AllTypesStructure":{ - "type":"structure", - "members":{ - "StringMember":{"shape":"String"}, - "IntegerMember":{"shape":"Integer"}, - "BooleanMember":{"shape":"Boolean"}, - "FloatMember":{"shape":"Float"}, - "DoubleMember":{"shape":"Double"}, - "LongMember":{"shape":"Long"}, - "SimpleList":{"shape":"ListOfStrings"}, - "ListOfEnums":{"shape":"ListOfEnums"}, - "ListOfMaps":{"shape":"ListOfMapStringToString"}, - "ListOfStructs":{"shape":"ListOfSimpleStructs"}, - "MapOfStringToIntegerList":{"shape":"MapOfStringToIntegerList"}, - "MapOfStringToString":{"shape":"MapOfStringToString"}, - "MapOfStringToSimpleStruct":{"shape":"MapOfStringToSimpleStruct"}, - "MapOfEnumToEnum":{"shape":"MapOfEnumToEnum"}, - "MapOfEnumToString":{"shape":"MapOfEnumToString"}, - "MapOfStringToEnum":{"shape":"MapOfStringToEnum"}, - "MapOfEnumToSimpleStruct":{"shape":"MapOfEnumToSimpleStruct"}, - "TimestampMember":{"shape":"Timestamp"}, - "StructWithNestedTimestampMember":{"shape":"StructWithTimestamp"}, - "BlobArg":{"shape":"BlobType"}, - "StructWithNestedBlob":{"shape":"StructWithNestedBlobType"}, - "BlobMap":{"shape":"BlobMapType"}, - "ListOfBlobs":{"shape":"ListOfBlobsType"}, - "RecursiveStruct":{"shape":"RecursiveStructType"}, - "PolymorphicTypeWithSubTypes":{"shape":"BaseType"}, - "PolymorphicTypeWithoutSubTypes":{"shape":"SubTypeOne"}, - "EnumType":{"shape":"EnumType"} - } - }, - "BaseType":{ - "type":"structure", - "members":{ - "BaseMember":{"shape":"String"} - } - }, - "BlobMapType":{ - "type":"map", - "key":{"shape":"String"}, - "value":{"shape":"BlobType"} - }, - "BlobType":{"type":"blob"}, - "Boolean":{"type":"boolean"}, - "Double":{"type":"double"}, - "EmptyModeledException":{ - "type":"structure", - "members":{ - }, - "exception":true - }, - "Float":{"type":"float"}, - "IdempotentOperationStructure":{ - "type":"structure", - "members":{ - "IdempotencyToken":{ - "shape":"String", - "idempotencyToken":true - } - } - }, - "Integer":{"type":"integer"}, - "ListOfBlobsType":{ - "type":"list", - "member":{"shape":"BlobType"} - }, - "ListOfIntegers":{ - "type":"list", - "member":{"shape":"Integer"} - }, - "ListOfListOfListOfStrings":{ - "type":"list", - "member":{"shape":"ListOfListOfStrings"} - }, - "ListOfListOfStrings":{ - "type":"list", - "member":{"shape":"ListOfStrings"} - }, - "ListOfMapStringToString":{ - "type":"list", - "member":{"shape":"MapOfStringToString"} - }, - "ListOfSimpleStructs":{ - "type":"list", - "member":{"shape":"SimpleStruct"} - }, - "ListOfStrings":{ - "type":"list", - "member":{"shape":"String"} - }, - "ListOfEnums":{ - "type":"list", - "member":{"shape":"EnumType"} - }, - "Long":{"type":"long"}, - "MapOfStringToIntegerList":{ - "type":"map", - "key":{"shape":"String"}, - "value":{"shape":"ListOfIntegers"} - }, - "MapOfStringToListOfListOfStrings":{ - "type":"map", - "key":{"shape":"String"}, - "value":{"shape":"ListOfListOfStrings"} - }, - "MapOfStringToSimpleStruct":{ - "type":"map", - "key":{"shape":"String"}, - "value":{"shape":"SimpleStruct"} - }, - "MapOfStringToString":{ - "type":"map", - "key":{"shape":"String"}, - "value":{"shape":"String"} - }, - "MapOfEnumToEnum":{ - "type":"map", - "key":{"shape":"EnumType"}, - "value":{"shape":"EnumType"} - }, - "MapOfEnumToString":{ - "type":"map", - "key":{"shape":"EnumType"}, - "value":{"shape":"String"} - }, - "MapOfStringToEnum":{ - "type":"map", - "key":{"shape":"String"}, - "value":{"shape":"EnumType"} - }, - "MapOfEnumToSimpleStruct":{ - "type":"map", - "key":{"shape":"EnumType"}, - "value":{"shape":"SimpleStruct"} - }, - "NestedContainersStructure":{ - "type":"structure", - "members":{ - "ListOfListOfStrings":{"shape":"ListOfListOfStrings"}, - "ListOfListOfListOfStrings":{"shape":"ListOfListOfListOfStrings"}, - "MapOfStringToListOfListOfStrings":{"shape":"MapOfStringToListOfListOfStrings"} - } - }, - "RecursiveListType":{ - "type":"list", - "member":{"shape":"RecursiveStructType"} - }, - "RecursiveMapType":{ - "type":"map", - "key":{"shape":"String"}, - "value":{"shape":"RecursiveStructType"} - }, - "RecursiveStructType":{ - "type":"structure", - "members":{ - "NoRecurse":{"shape":"String"}, - "RecursiveStruct":{"shape":"RecursiveStructType"}, - "RecursiveList":{"shape":"RecursiveListType"}, - "RecursiveMap":{"shape":"RecursiveMapType"} - } - }, - "SimpleStruct":{ - "type":"structure", - "members":{ - "StringMember":{"shape":"String"} - } - }, - "StreamType":{ - "type":"blob", - "streaming":true - }, - "String":{"type":"string"}, - "StructWithNestedBlobType":{ - "type":"structure", - "members":{ - "NestedBlob":{"shape":"BlobType"} - } - }, - "StructWithTimestamp":{ - "type":"structure", - "members":{ - "NestedTimestamp":{"shape":"Timestamp"} - } - }, - "StructureWithStreamingMember":{ - "type":"structure", - "members":{ - "StreamingMember":{"shape":"StreamType"} - }, - "payload":"StreamingMember" - }, - "SubTypeOne":{ - "type":"structure", - "members":{ - "SubTypeOneMember":{"shape":"String"} - } - }, - "EnumType": { - "type":"string", - "enum": [ - "EnumValue1", "EnumValue2" - ] - }, - "Timestamp":{"type":"timestamp"} - } -} diff --git a/test/codegen-generated-classes-test/src/main/resources/codegen-resources/customresponsemetadata/service-2.json b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/customresponsemetadata/service-2.json index ee9c551a69bf..db2252d4653d 100644 --- a/test/codegen-generated-classes-test/src/main/resources/codegen-resources/customresponsemetadata/service-2.json +++ b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/customresponsemetadata/service-2.json @@ -151,6 +151,26 @@ }, "input":{"shape":"QueryParamWithoutValueInput"} }, + "StreamingInputOperationWithRequiredChecksum":{ + "name":"OperationWithRequiredChecksum", + "http":{ + "method":"POST", + "requestUri":"/2016-03-11/allTypes" + }, + "input":{"shape":"StructureWithStreamingMember"}, + "output":{"shape":"AllTypesStructure"}, + "httpChecksumRequired": true + }, + "OperationWithRequiredChecksum":{ + "name":"OperationWithRequiredChecksum", + "http":{ + "method":"POST", + "requestUri":"/2016-03-11/allTypes" + }, + "input":{"shape":"AllTypesStructure"}, + "output":{"shape":"AllTypesStructure"}, + "httpChecksumRequired": true + }, "StreamingInputOperation":{ "name":"StreamingInputOperation", "http":{ diff --git a/test/codegen-generated-classes-test/src/main/resources/codegen-resources/xml/service-2.json b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/xml/service-2.json index fef93c63e449..44a7a4395de4 100644 --- a/test/codegen-generated-classes-test/src/main/resources/codegen-resources/xml/service-2.json +++ b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/xml/service-2.json @@ -151,6 +151,26 @@ }, "input":{"shape":"QueryParamWithoutValueInput"} }, + "StreamingInputOperationWithRequiredChecksum":{ + "name":"OperationWithRequiredChecksum", + "http":{ + "method":"POST", + "requestUri":"/2016-03-11/allTypes" + }, + "input":{"shape":"StructureWithStreamingMember"}, + "output":{"shape":"AllTypesStructure"}, + "httpChecksumRequired": true + }, + "OperationWithRequiredChecksum":{ + "name":"OperationWithRequiredChecksum", + "http":{ + "method":"POST", + "requestUri":"/2016-03-11/allTypes" + }, + "input":{"shape":"AllTypesStructure"}, + "output":{"shape":"AllTypesStructure"}, + "httpChecksumRequired": true + }, "StreamingInputOperation":{ "name":"StreamingInputOperation", "http":{ diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/HttpChecksumRequiredTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/HttpChecksumRequiredTest.java new file mode 100644 index 000000000000..f481d4ebcd3f --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/HttpChecksumRequiredTest.java @@ -0,0 +1,162 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Matchers.any; + +import io.reactivex.Flowable; +import java.io.IOException; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionException; +import org.junit.Before; +import org.junit.Test; +import org.mockito.ArgumentCaptor; +import org.mockito.Mockito; +import software.amazon.awssdk.auth.credentials.AnonymousCredentialsProvider; +import software.amazon.awssdk.awscore.client.builder.AwsAsyncClientBuilder; +import software.amazon.awssdk.awscore.client.builder.AwsClientBuilder; +import software.amazon.awssdk.awscore.client.builder.AwsSyncClientBuilder; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.http.ExecutableHttpRequest; +import software.amazon.awssdk.http.HttpExecuteRequest; +import software.amazon.awssdk.http.HttpExecuteResponse; +import software.amazon.awssdk.http.SdkHttpClient; +import software.amazon.awssdk.http.SdkHttpFullResponse; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.http.SdkHttpResponse; +import software.amazon.awssdk.http.async.AsyncExecuteRequest; +import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonAsyncClient; +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonClient; +import software.amazon.awssdk.services.protocolrestxml.ProtocolRestXmlAsyncClient; +import software.amazon.awssdk.services.protocolrestxml.ProtocolRestXmlClient; + +/** + * Verify that the "httpChecksumRequired" C2J trait results in a valid MD5 checksum of the payload being included in the HTTP + * request. + */ +public class HttpChecksumRequiredTest { + private SdkHttpClient httpClient; + private SdkAsyncHttpClient httpAsyncClient; + + private ProtocolRestJsonClient jsonClient; + private ProtocolRestJsonAsyncClient jsonAsyncClient; + private ProtocolRestXmlClient xmlClient; + private ProtocolRestXmlAsyncClient xmlAsyncClient; + + @Before + public void setup() throws IOException { + httpClient = Mockito.mock(SdkHttpClient.class); + httpAsyncClient = Mockito.mock(SdkAsyncHttpClient.class); + + jsonClient = initializeSync(ProtocolRestJsonClient.builder()).build(); + jsonAsyncClient = initializeAsync(ProtocolRestJsonAsyncClient.builder()).build(); + xmlClient = initializeSync(ProtocolRestXmlClient.builder()).build(); + xmlAsyncClient = initializeAsync(ProtocolRestXmlAsyncClient.builder()).build(); + + SdkHttpFullResponse successfulHttpResponse = SdkHttpResponse.builder() + .statusCode(200) + .putHeader("Content-Length", "0") + .build(); + + ExecutableHttpRequest request = Mockito.mock(ExecutableHttpRequest.class); + Mockito.when(request.call()).thenReturn(HttpExecuteResponse.builder() + .response(successfulHttpResponse) + .build()); + Mockito.when(httpClient.prepareRequest(any())).thenReturn(request); + + Mockito.when(httpAsyncClient.execute(any())).thenAnswer(invocation -> { + AsyncExecuteRequest asyncExecuteRequest = invocation.getArgumentAt(0, AsyncExecuteRequest.class); + asyncExecuteRequest.responseHandler().onHeaders(successfulHttpResponse); + asyncExecuteRequest.responseHandler().onStream(Flowable.empty()); + return CompletableFuture.completedFuture(null); + }); + } + + private & AwsClientBuilder> T initializeSync(T syncClientBuilder) { + return initialize(syncClientBuilder.httpClient(httpClient)); + } + + private & AwsClientBuilder> T initializeAsync(T asyncClientBuilder) { + return initialize(asyncClientBuilder.httpClient(httpAsyncClient)); + } + + private > T initialize(T clientBuilder) { + return clientBuilder.credentialsProvider(AnonymousCredentialsProvider.create()) + .region(Region.US_WEST_2); + } + + @Test + public void syncJsonSupportsChecksumRequiredTrait() { + jsonClient.operationWithRequiredChecksum(r -> r.stringMember("foo")); + assertThat(getSyncRequest().firstMatchingHeader("Content-MD5")).hasValue("g8VCvPTPCMoU01rBlBVt9w=="); + } + + @Test + public void syncStreamingInputJsonSupportsChecksumRequiredTrait() { + jsonClient.streamingInputOperationWithRequiredChecksum(r -> {}, RequestBody.fromString("foo")); + assertThat(getSyncRequest().firstMatchingHeader("Content-MD5")).hasValue("rL0Y20zC+Fzt72VPzMSk2A=="); + } + + @Test + public void syncStreamingInputXmlSupportsChecksumRequiredTrait() { + xmlClient.streamingInputOperationWithRequiredChecksum(r -> {}, RequestBody.fromString("foo")); + assertThat(getSyncRequest().firstMatchingHeader("Content-MD5")).hasValue("rL0Y20zC+Fzt72VPzMSk2A=="); + } + + @Test + public void syncXmlSupportsChecksumRequiredTrait() { + xmlClient.operationWithRequiredChecksum(r -> r.stringMember("foo")); + assertThat(getSyncRequest().firstMatchingHeader("Content-MD5")).hasValue("vqm481l+Lv0zEvdu+duE6Q=="); + } + + @Test + public void asyncJsonSupportsChecksumRequiredTrait() { + jsonAsyncClient.operationWithRequiredChecksum(r -> r.stringMember("foo")).join(); + assertThat(getAsyncRequest().firstMatchingHeader("Content-MD5")).hasValue("g8VCvPTPCMoU01rBlBVt9w=="); + } + + @Test + public void asyncXmlSupportsChecksumRequiredTrait() { + xmlAsyncClient.operationWithRequiredChecksum(r -> r.stringMember("foo")).join(); + assertThat(getAsyncRequest().firstMatchingHeader("Content-MD5")).hasValue("vqm481l+Lv0zEvdu+duE6Q=="); + } + + @Test(expected = CompletionException.class) + public void asyncStreamingInputJsonFailsWithChecksumRequiredTrait() { + jsonAsyncClient.streamingInputOperationWithRequiredChecksum(r -> {}, AsyncRequestBody.fromString("foo")).join(); + } + + @Test(expected = CompletionException.class) + public void asyncStreamingInputXmlFailsWithChecksumRequiredTrait() { + xmlAsyncClient.streamingInputOperationWithRequiredChecksum(r -> {}, AsyncRequestBody.fromString("foo")).join(); + } + + private SdkHttpRequest getSyncRequest() { + ArgumentCaptor captor = ArgumentCaptor.forClass(HttpExecuteRequest.class); + Mockito.verify(httpClient).prepareRequest(captor.capture()); + return captor.getValue().httpRequest(); + } + + private SdkHttpRequest getAsyncRequest() { + ArgumentCaptor captor = ArgumentCaptor.forClass(AsyncExecuteRequest.class); + Mockito.verify(httpAsyncClient).execute(captor.capture()); + return captor.getValue().request(); + } +} diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/codegenerationjsonrpccustomized/model/GetValueForFieldTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/protocolrestjson/model/GetValueForFieldTest.java similarity index 94% rename from test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/codegenerationjsonrpccustomized/model/GetValueForFieldTest.java rename to test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/protocolrestjson/model/GetValueForFieldTest.java index 17a4f033d290..33275739d2e2 100644 --- a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/codegenerationjsonrpccustomized/model/GetValueForFieldTest.java +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/protocolrestjson/model/GetValueForFieldTest.java @@ -13,7 +13,7 @@ * permissions and limitations under the License. */ -package software.amazon.awssdk.services.codegenerationjsonrpccustomized.model; +package software.amazon.awssdk.services.protocolrestjson.model; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/codegenerationjsonrpccustomized/model/ListCopierTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/protocolrestjson/model/ListCopierTest.java similarity index 94% rename from test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/codegenerationjsonrpccustomized/model/ListCopierTest.java rename to test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/protocolrestjson/model/ListCopierTest.java index 0e66060e0824..feaf6793d43f 100644 --- a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/codegenerationjsonrpccustomized/model/ListCopierTest.java +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/protocolrestjson/model/ListCopierTest.java @@ -13,7 +13,7 @@ * permissions and limitations under the License. */ -package software.amazon.awssdk.services.codegenerationjsonrpccustomized.model; +package software.amazon.awssdk.services.protocolrestjson.model; import static org.assertj.core.api.Assertions.assertThat; diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/codegenerationjsonrpccustomized/model/MapCopierTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/protocolrestjson/model/MapCopierTest.java similarity index 93% rename from test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/codegenerationjsonrpccustomized/model/MapCopierTest.java rename to test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/protocolrestjson/model/MapCopierTest.java index 21b49b13fe87..98102618dad9 100644 --- a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/codegenerationjsonrpccustomized/model/MapCopierTest.java +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/protocolrestjson/model/MapCopierTest.java @@ -13,8 +13,9 @@ * permissions and limitations under the License. */ -package software.amazon.awssdk.services.codegenerationjsonrpccustomized.model; +package software.amazon.awssdk.services.protocolrestjson.model; +import org.assertj.core.api.Assertions; import org.junit.Test; import software.amazon.awssdk.core.util.DefaultSdkAutoConstructMap; import software.amazon.awssdk.core.util.SdkAutoConstructMap; @@ -69,7 +70,7 @@ public void unknownEnumKeyNotAddedToCopiedMap() { Map mapOfEnumToEnum = new HashMap<>(); mapOfEnumToEnum.put("foo", "bar"); Map copy = MapOfEnumToEnumCopier.copyStringToEnum(mapOfEnumToEnum); - assertThat(copy).isEmpty(); + Assertions.assertThat(copy).isEmpty(); } @Test @@ -77,6 +78,6 @@ public void knownEnumKeyAddedToCopiedMap() { Map mapOfEnumToEnum = new HashMap<>(); mapOfEnumToEnum.put(EnumType.ENUM_VALUE1.toString(), "bar"); Map copy = MapOfEnumToEnumCopier.copyStringToEnum(mapOfEnumToEnum); - assertThat(copy).hasSize(1); + Assertions.assertThat(copy).hasSize(1); } } diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/codegenerationjsonrpccustomized/model/ModelBuilderListMemberTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/protocolrestjson/model/ModelBuilderListMemberTest.java similarity index 96% rename from test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/codegenerationjsonrpccustomized/model/ModelBuilderListMemberTest.java rename to test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/protocolrestjson/model/ModelBuilderListMemberTest.java index 333f60f71ff5..64281a760651 100644 --- a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/codegenerationjsonrpccustomized/model/ModelBuilderListMemberTest.java +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/protocolrestjson/model/ModelBuilderListMemberTest.java @@ -13,7 +13,7 @@ * permissions and limitations under the License. */ -package software.amazon.awssdk.services.codegenerationjsonrpccustomized.model; +package software.amazon.awssdk.services.protocolrestjson.model; import static org.assertj.core.api.Assertions.assertThat; diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/codegenerationjsonrpccustomized/model/ModelBuilderMapMemberTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/protocolrestjson/model/ModelBuilderMapMemberTest.java similarity index 96% rename from test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/codegenerationjsonrpccustomized/model/ModelBuilderMapMemberTest.java rename to test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/protocolrestjson/model/ModelBuilderMapMemberTest.java index 232e1ec3b9b5..7a91e323e39c 100644 --- a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/codegenerationjsonrpccustomized/model/ModelBuilderMapMemberTest.java +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/protocolrestjson/model/ModelBuilderMapMemberTest.java @@ -13,7 +13,7 @@ * permissions and limitations under the License. */ -package software.amazon.awssdk.services.codegenerationjsonrpccustomized.model; +package software.amazon.awssdk.services.protocolrestjson.model; import org.junit.Test; import software.amazon.awssdk.core.util.SdkAutoConstructMap; diff --git a/test/http-client-tests/pom.xml b/test/http-client-tests/pom.xml index 2d835a931eff..a158aa2b7181 100644 --- a/test/http-client-tests/pom.xml +++ b/test/http-client-tests/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT ../../pom.xml http-client-tests diff --git a/test/module-path-tests/pom.xml b/test/module-path-tests/pom.xml index c9983cdd8eee..acfea6da424b 100644 --- a/test/module-path-tests/pom.xml +++ b/test/module-path-tests/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/protocol-tests-core/pom.xml b/test/protocol-tests-core/pom.xml index 655c08ae186b..9c05a771082e 100644 --- a/test/protocol-tests-core/pom.xml +++ b/test/protocol-tests-core/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/protocol-tests/pom.xml b/test/protocol-tests/pom.xml index 6b4f1a3f5fb0..37528563361b 100644 --- a/test/protocol-tests/pom.xml +++ b/test/protocol-tests/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/protocol-tests/src/main/resources/codegen-resources/query/customization.config b/test/protocol-tests/src/main/resources/codegen-resources/query/customization.config index 8fde3b55e355..aa386944c5d6 100644 --- a/test/protocol-tests/src/main/resources/codegen-resources/query/customization.config +++ b/test/protocol-tests/src/main/resources/codegen-resources/query/customization.config @@ -10,7 +10,5 @@ "queryParamWithoutValue", "idempotentOperation", "queryTypes" - ], - // The tests expect non auto construct lists - "useAutoConstructList": false + ] } diff --git a/test/sdk-benchmarks/pom.xml b/test/sdk-benchmarks/pom.xml index 9bd39cc92701..85064fd1df31 100755 --- a/test/sdk-benchmarks/pom.xml +++ b/test/sdk-benchmarks/pom.xml @@ -19,7 +19,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT ../../pom.xml diff --git a/test/service-test-utils/pom.xml b/test/service-test-utils/pom.xml index 314f81d61a2e..fe868926e60f 100644 --- a/test/service-test-utils/pom.xml +++ b/test/service-test-utils/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT ../../pom.xml service-test-utils diff --git a/test/stability-tests/pom.xml b/test/stability-tests/pom.xml index 6098ec84d25d..845b48905463 100644 --- a/test/stability-tests/pom.xml +++ b/test/stability-tests/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/utils/StabilityTestRunner.java b/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/utils/StabilityTestRunner.java index 156fd3f90e38..f3131f43a554 100644 --- a/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/utils/StabilityTestRunner.java +++ b/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/utils/StabilityTestRunner.java @@ -74,7 +74,7 @@ public class StabilityTestRunner { private static final int TESTS_TIMEOUT_IN_MINUTES = 60; // The peak thread count might be different depending on the machine the tests are currently running on. // because of the internal thread pool used in AsynchronousFileChannel - private static final int ALLOWED_PEAK_THREAD_COUNT = 60; + private static final int ALLOWED_PEAK_THREAD_COUNT = 90; private ThreadMXBean threadMXBean; private IntFunction> futureFactory; diff --git a/test/test-utils/pom.xml b/test/test-utils/pom.xml index 261e996cc151..c7120589bc6d 100644 --- a/test/test-utils/pom.xml +++ b/test/test-utils/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT ../../pom.xml test-utils diff --git a/test/tests-coverage-reporting/pom.xml b/test/tests-coverage-reporting/pom.xml index 586f053ae606..e83dd9d6453e 100644 --- a/test/tests-coverage-reporting/pom.xml +++ b/test/tests-coverage-reporting/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/utils/pom.xml b/utils/pom.xml index 915229fa5bca..858e962387f2 100644 --- a/utils/pom.xml +++ b/utils/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.15.8-SNAPSHOT + 2.15.40-SNAPSHOT 4.0.0 diff --git a/utils/src/main/java/software/amazon/awssdk/utils/Lazy.java b/utils/src/main/java/software/amazon/awssdk/utils/Lazy.java index 6923ab8a0bd9..2277938de961 100644 --- a/utils/src/main/java/software/amazon/awssdk/utils/Lazy.java +++ b/utils/src/main/java/software/amazon/awssdk/utils/Lazy.java @@ -20,9 +20,11 @@ /** * A class that lazily constructs a value the first time {@link #getValue()} is invoked. + * + * This should be {@link #close()}d if the initializer returns value that needs to be {@link AutoCloseable#close()}d. */ @SdkPublicApi -public class Lazy { +public class Lazy implements SdkAutoCloseable { private final Supplier initializer; private volatile T value; @@ -53,4 +55,17 @@ public String toString() { .add("value", value == null ? "Uninitialized" : value) .build(); } + + @Override + public void close() { + try { + // Make sure the value has been initialized before we attempt to close it + getValue(); + } catch (RuntimeException e) { + // Failed to initialize the value. + } + + IoUtils.closeIfCloseable(initializer, null); + IoUtils.closeIfCloseable(value, null); + } } diff --git a/utils/src/main/java/software/amazon/awssdk/utils/UserHomeDirectoryUtils.java b/utils/src/main/java/software/amazon/awssdk/utils/UserHomeDirectoryUtils.java new file mode 100644 index 000000000000..1677b5221c4a --- /dev/null +++ b/utils/src/main/java/software/amazon/awssdk/utils/UserHomeDirectoryUtils.java @@ -0,0 +1,62 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.utils; + +import software.amazon.awssdk.annotations.SdkProtectedApi; + +/** + * Load the home directory that should be used for the stored file. This will check the same environment variables as the CLI + * to identify the location of home, before falling back to java-specific resolution. + */ +@SdkProtectedApi +public final class UserHomeDirectoryUtils { + + private UserHomeDirectoryUtils() { + + } + + public static String userHomeDirectory() { + // To match the logic of the CLI we have to consult environment variables directly. + // CHECKSTYLE:OFF + String home = System.getenv("HOME"); + + if (home != null) { + return home; + } + + boolean isWindows = JavaSystemSetting.OS_NAME.getStringValue() + .map(s -> StringUtils.lowerCase(s).startsWith("windows")) + .orElse(false); + + if (isWindows) { + String userProfile = System.getenv("USERPROFILE"); + + if (userProfile != null) { + return userProfile; + } + + String homeDrive = System.getenv("HOMEDRIVE"); + String homePath = System.getenv("HOMEPATH"); + + if (homeDrive != null && homePath != null) { + return homeDrive + homePath; + } + } + + return JavaSystemSetting.USER_HOME.getStringValueOrThrow(); + // CHECKSTYLE:ON + } +} diff --git a/utils/src/main/java/software/amazon/awssdk/utils/async/FilteringSubscriber.java b/utils/src/main/java/software/amazon/awssdk/utils/async/FilteringSubscriber.java index d054ab632e51..c93f16870919 100644 --- a/utils/src/main/java/software/amazon/awssdk/utils/async/FilteringSubscriber.java +++ b/utils/src/main/java/software/amazon/awssdk/utils/async/FilteringSubscriber.java @@ -34,8 +34,8 @@ public FilteringSubscriber(Subscriber sourceSubscriber, Predicate @Override public void onSubscribe(Subscription subscription) { - super.onSubscribe(subscription); this.subscription = subscription; + super.onSubscribe(subscription); } @Override diff --git a/utils/src/main/java/software/amazon/awssdk/utils/async/LimitingSubscriber.java b/utils/src/main/java/software/amazon/awssdk/utils/async/LimitingSubscriber.java index f83e1f7a9fb2..ee0460d2a97b 100644 --- a/utils/src/main/java/software/amazon/awssdk/utils/async/LimitingSubscriber.java +++ b/utils/src/main/java/software/amazon/awssdk/utils/async/LimitingSubscriber.java @@ -19,6 +19,7 @@ import org.reactivestreams.Subscriber; import org.reactivestreams.Subscription; import software.amazon.awssdk.annotations.SdkProtectedApi; +import software.amazon.awssdk.utils.internal.async.EmptySubscription; @SdkProtectedApi public class LimitingSubscriber extends DelegatingSubscriber { @@ -35,19 +36,25 @@ public LimitingSubscriber(Subscriber subscriber, int limit) { @Override public void onSubscribe(Subscription subscription) { - super.onSubscribe(subscription); this.subscription = subscription; + if (limit == 0) { + subscription.cancel(); + super.onSubscribe(new EmptySubscription(super.subscriber)); + } else { + super.onSubscribe(subscription); + } } @Override public void onNext(T t) { + int deliveredItems = delivered.incrementAndGet(); // We may get more events even after cancelling so we ignore them. - if (delivered.get() < limit) { + if (deliveredItems <= limit) { subscriber.onNext(t); - } - // If we've met the limit then we can cancel the subscription - if (delivered.incrementAndGet() >= limit) { - subscription.cancel(); + if (deliveredItems == limit) { + subscription.cancel(); + subscriber.onComplete(); + } } } } diff --git a/utils/src/main/java/software/amazon/awssdk/utils/http/SdkHttpUtils.java b/utils/src/main/java/software/amazon/awssdk/utils/http/SdkHttpUtils.java index b1d2fa7773c9..263850cfe3f8 100644 --- a/utils/src/main/java/software/amazon/awssdk/utils/http/SdkHttpUtils.java +++ b/utils/src/main/java/software/amazon/awssdk/utils/http/SdkHttpUtils.java @@ -15,6 +15,9 @@ package software.amazon.awssdk.utils.http; +import static java.util.stream.Collectors.groupingBy; +import static java.util.stream.Collectors.mapping; +import static java.util.stream.Collectors.toList; import static software.amazon.awssdk.utils.FunctionalUtils.invokeSafely; import java.io.UnsupportedEncodingException; @@ -30,6 +33,7 @@ import java.util.Optional; import java.util.Set; import java.util.function.UnaryOperator; +import java.util.regex.Pattern; import java.util.stream.Collectors; import java.util.stream.Stream; import software.amazon.awssdk.annotations.SdkProtectedApi; @@ -52,6 +56,9 @@ public final class SdkHttpUtils { private static final String[] ENCODED_CHARACTERS_WITHOUT_SLASHES = new String[] {"+", "*", "%7E"}; private static final String[] ENCODED_CHARACTERS_WITHOUT_SLASHES_REPLACEMENTS = new String[] {"%20", "%2A", "~"}; + private static final String QUERY_PARAM_DELIMITER_REGEX = "\\s*&\\s*"; + private static final Pattern QUERY_PARAM_DELIMITER_PATTERN = Pattern.compile(QUERY_PARAM_DELIMITER_REGEX); + // List of headers that may appear only once in a request; i.e. is not a list of values. // Taken from https://github.com/apache/httpcomponents-client/blob/81c1bc4dc3ca5a3134c5c60e8beff08be2fd8792/httpclient5-cache/src/test/java/org/apache/hc/client5/http/impl/cache/HttpTestUtils.java#L69-L85 with modifications: // removed: accept-ranges, if-match, if-none-match, vary since it looks like they're defined as lists @@ -62,6 +69,7 @@ public final class SdkHttpUtils { "proxy-authorization", "range", "referer", "retry-after", "server", "user-agent") .collect(Collectors.toSet()); + private SdkHttpUtils() { } @@ -322,4 +330,15 @@ public static Optional firstMatchingHeaderFromCollection(Map> uriParams(URI uri) { + return QUERY_PARAM_DELIMITER_PATTERN + .splitAsStream(uri.getRawQuery().trim()) + .map(s -> s.contains("=") ? s.split("=", 2) : new String[] {s, null}) + .collect(groupingBy(a -> urlDecode(a[0]), mapping(a -> urlDecode(a[1]), toList()))); + } + } diff --git a/utils/src/main/java/software/amazon/awssdk/utils/internal/MappingSubscriber.java b/utils/src/main/java/software/amazon/awssdk/utils/internal/MappingSubscriber.java new file mode 100644 index 000000000000..cf44a3255d5a --- /dev/null +++ b/utils/src/main/java/software/amazon/awssdk/utils/internal/MappingSubscriber.java @@ -0,0 +1,92 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.utils.internal; + +import java.util.function.Function; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; +import software.amazon.awssdk.annotations.SdkInternalApi; + +/** + * Maps a subscriber of one type to another type. If an exception is thrown by the mapping function itself, the error + * will be propagated to the downstream subscriber as if it had come from the publisher and then the subscription will + * be implicitly cancelled and no further events from the publisher will be passed along. + */ +@SdkInternalApi +public class MappingSubscriber implements Subscriber { + + private final Subscriber delegateSubscriber; + private final Function mapFunction; + private boolean isCancelled = false; + private Subscription subscription = null; + + private MappingSubscriber(Subscriber delegateSubscriber, + Function mapFunction) { + this.delegateSubscriber = delegateSubscriber; + this.mapFunction = mapFunction; + } + + public static MappingSubscriber create(Subscriber subscriber, + Function mapFunction) { + return new MappingSubscriber<>(subscriber, mapFunction); + } + + @Override + public void onSubscribe(Subscription subscription) { + this.subscription = subscription; + delegateSubscriber.onSubscribe(subscription); + } + + @Override + public void onError(Throwable throwable) { + if (!isCancelled) { + delegateSubscriber.onError(throwable); + } + } + + @Override + public void onComplete() { + if (!isCancelled) { + delegateSubscriber.onComplete(); + } + } + + @Override + public void onNext(T t) { + if (!isCancelled) { + try { + delegateSubscriber.onNext(mapFunction.apply(t)); + } catch (RuntimeException e) { + // If the map function throws an exception, the subscription should be cancelled as the publisher will + // otherwise not be aware it has happened and should have the opportunity to clean up resources. + cancelSubscriptions(); + delegateSubscriber.onError(e); + } + } + } + + private void cancelSubscriptions() { + this.isCancelled = true; + + if (this.subscription != null) { + try { + this.subscription.cancel(); + } catch (RuntimeException ignored) { + // ignore exceptions + } + } + } +} diff --git a/utils/src/main/java/software/amazon/awssdk/utils/internal/async/EmptySubscription.java b/utils/src/main/java/software/amazon/awssdk/utils/internal/async/EmptySubscription.java new file mode 100644 index 000000000000..4bac84d8702b --- /dev/null +++ b/utils/src/main/java/software/amazon/awssdk/utils/internal/async/EmptySubscription.java @@ -0,0 +1,63 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.utils.internal.async; + +import java.util.concurrent.atomic.AtomicBoolean; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; +import software.amazon.awssdk.annotations.SdkInternalApi; + +/** + * A NoOp implementation of {@link Subscription} interface. + * + * This subscription calls {@link Subscriber#onComplete()} on first request for data and then terminates the subscription. + */ +@SdkInternalApi +public final class EmptySubscription implements Subscription { + + private final AtomicBoolean isTerminated = new AtomicBoolean(false); + private final Subscriber subscriber; + + public EmptySubscription(Subscriber subscriber) { + this.subscriber = subscriber; + } + + @Override + public void request(long n) { + if (isTerminated()) { + return; + } + if (n <= 0) { + throw new IllegalArgumentException("Non-positive request signals are illegal"); + } + if (terminate()) { + subscriber.onComplete(); + } + } + + @Override + public void cancel() { + terminate(); + } + + private boolean terminate() { + return isTerminated.compareAndSet(false, true); + } + + private boolean isTerminated() { + return isTerminated.get(); + } +} diff --git a/utils/src/test/java/software/amazon/awssdk/testutils/EnvironmentVariableHelper.java b/utils/src/test/java/software/amazon/awssdk/testutils/EnvironmentVariableHelper.java new file mode 100644 index 000000000000..13f67785017c --- /dev/null +++ b/utils/src/test/java/software/amazon/awssdk/testutils/EnvironmentVariableHelper.java @@ -0,0 +1,146 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.testutils; + +import java.lang.reflect.Field; +import java.security.AccessController; +import java.security.PrivilegedActionException; +import java.security.PrivilegedExceptionAction; +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; +import java.util.function.Consumer; +import org.junit.rules.ExternalResource; +import software.amazon.awssdk.utils.SystemSetting; + +/** + * A utility that can temporarily forcibly set environment variables and + * then allows resetting them to the original values. + */ +public class EnvironmentVariableHelper extends ExternalResource { + + private final Map originalEnvironmentVariables; + private final Map modifiableMap; + private volatile boolean mutated = false; + + public EnvironmentVariableHelper() { + // CHECKSTYLE:OFF - This is a specific utility around system environment variables + originalEnvironmentVariables = new HashMap<>(System.getenv()); + modifiableMap = Optional.ofNullable(processEnv()).orElse(envMap()); + // CHECKSTYLE:ON + } + + public void remove(SystemSetting setting) { + remove(setting.environmentVariable()); + } + + public void remove(String key) { + mutated = true; + modifiableMap.remove(key); + } + + public void set(SystemSetting setting, String value) { + set(setting.environmentVariable(), value); + } + + public void set(String key, String value) { + mutated = true; + modifiableMap.put(key, value); + } + + public void reset() { + if (mutated) { + synchronized (this) { + if (mutated) { + modifiableMap.clear(); + modifiableMap.putAll(originalEnvironmentVariables); + mutated = false; + } + } + } + } + + @Override + protected void after() { + reset(); + } + + private PrivilegedExceptionAction setAccessible(Field f) { + return () -> { + f.setAccessible(true); + return null; + }; + } + + /** + * Static run method that allows for "single-use" environment variable modification. + * + * Example use: + *
+     * {@code
+     * EnvironmentVariableHelper.run(helper -> {
+     *    helper.set("variable", "value");
+     *    //run some test that uses "variable"
+     * });
+     * }
+     * 
+ * + * Will call {@link #reset} at the end of the block (even if the block exits exceptionally). + * + * @param helperConsumer a code block to run that gets an {@link EnvironmentVariableHelper} as an argument + */ + public static void run(Consumer helperConsumer) { + EnvironmentVariableHelper helper = new EnvironmentVariableHelper(); + try { + helperConsumer.accept(helper); + } finally { + helper.reset(); + } + } + + private Map envMap() { + // CHECKSTYLE:OFF - This is a specific utility around system environment variables + return getField(System.getenv().getClass(), System.getenv(), "m"); + // CHECKSTYLE:ON + } + + /** + * Windows is using a different process environment. + * + * See http://hg.openjdk.java.net/jdk8/jdk8/jdk/file/687fd7c7986d/src/windows/classes/java/lang/ProcessEnvironment.java#l235 + */ + private Map processEnv() { + Class processEnvironment; + try { + processEnvironment = Class.forName("java.lang.ProcessEnvironment"); + return getField(processEnvironment, null, "theCaseInsensitiveEnvironment"); + } catch (ClassNotFoundException e) { + return null; + } + } + + @SuppressWarnings("unchecked") + private Map getField(Class processEnvironment, Object obj, String fieldName) { + try { + Field declaredField = processEnvironment.getDeclaredField(fieldName); + AccessController.doPrivileged(setAccessible(declaredField)); + + return (Map) declaredField.get(obj); + } catch (IllegalAccessException | NoSuchFieldException | PrivilegedActionException e) { + return null; + } + } +} diff --git a/utils/src/test/java/software/amazon/awssdk/utils/SdkHttpUtilsTest.java b/utils/src/test/java/software/amazon/awssdk/utils/SdkHttpUtilsTest.java index 0d885deb410e..e514974efc90 100644 --- a/utils/src/test/java/software/amazon/awssdk/utils/SdkHttpUtilsTest.java +++ b/utils/src/test/java/software/amazon/awssdk/utils/SdkHttpUtilsTest.java @@ -19,7 +19,12 @@ import static java.util.Collections.singletonList; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatExceptionOfType; +import static org.assertj.core.api.Assertions.entry; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.LinkedHashMap; @@ -173,4 +178,46 @@ public void headersFromCollectionWorksCorrectly() { assertThat(SdkHttpUtils.firstMatchingHeaderFromCollection(headers, asList("foo", "nothing"))).hasValue("bar"); assertThat(SdkHttpUtils.firstMatchingHeaderFromCollection(headers, asList("foo", "other"))).hasValue("foo"); } + + @Test + public void isSingleHeader() { + assertThat(SdkHttpUtils.isSingleHeader("age")).isTrue(); + assertThat(SdkHttpUtils.isSingleHeader("authorization")).isTrue(); + assertThat(SdkHttpUtils.isSingleHeader("content-length")).isTrue(); + assertThat(SdkHttpUtils.isSingleHeader("content-location")).isTrue(); + assertThat(SdkHttpUtils.isSingleHeader("content-md5")).isTrue(); + assertThat(SdkHttpUtils.isSingleHeader("content-range")).isTrue(); + assertThat(SdkHttpUtils.isSingleHeader("content-type")).isTrue(); + assertThat(SdkHttpUtils.isSingleHeader("date")).isTrue(); + assertThat(SdkHttpUtils.isSingleHeader("etag")).isTrue(); + assertThat(SdkHttpUtils.isSingleHeader("expires")).isTrue(); + assertThat(SdkHttpUtils.isSingleHeader("from")).isTrue(); + assertThat(SdkHttpUtils.isSingleHeader("host")).isTrue(); + assertThat(SdkHttpUtils.isSingleHeader("if-modified-since")).isTrue(); + assertThat(SdkHttpUtils.isSingleHeader("if-range")).isTrue(); + assertThat(SdkHttpUtils.isSingleHeader("if-unmodified-since")).isTrue(); + assertThat(SdkHttpUtils.isSingleHeader("last-modified")).isTrue(); + assertThat(SdkHttpUtils.isSingleHeader("location")).isTrue(); + assertThat(SdkHttpUtils.isSingleHeader("max-forwards")).isTrue(); + assertThat(SdkHttpUtils.isSingleHeader("proxy-authorization")).isTrue(); + assertThat(SdkHttpUtils.isSingleHeader("range")).isTrue(); + assertThat(SdkHttpUtils.isSingleHeader("referer")).isTrue(); + assertThat(SdkHttpUtils.isSingleHeader("retry-after")).isTrue(); + assertThat(SdkHttpUtils.isSingleHeader("server")).isTrue(); + assertThat(SdkHttpUtils.isSingleHeader("user-agent")).isTrue(); + + assertThat(SdkHttpUtils.isSingleHeader("custom")).isFalse(); + } + + @Test + public void uriParams() throws URISyntaxException { + URI uri = URI.create("https://github.com/aws/aws-sdk-java-v2/issues/2034?reqParam=1234&oParam=3456&reqParam=5678&noval" + + "&decoded%26Part=equals%3Dval"); + Map> uriParams = SdkHttpUtils.uriParams(uri); + assertThat(uriParams).contains(entry("reqParam", Arrays.asList("1234", "5678")), + entry("oParam", Collections.singletonList("3456")), + entry("noval", Arrays.asList((String)null)), + entry("decoded&Part", Arrays.asList("equals=val"))); + } + } diff --git a/core/profiles/src/test/java/software/amazon/awssdk/profiles/ProfileFileLocationTest.java b/utils/src/test/java/software/amazon/awssdk/utils/UserHomeDirectoryUtilsTest.java similarity index 73% rename from core/profiles/src/test/java/software/amazon/awssdk/profiles/ProfileFileLocationTest.java rename to utils/src/test/java/software/amazon/awssdk/utils/UserHomeDirectoryUtilsTest.java index cf848bd64707..a77acc529bc0 100644 --- a/core/profiles/src/test/java/software/amazon/awssdk/profiles/ProfileFileLocationTest.java +++ b/utils/src/test/java/software/amazon/awssdk/utils/UserHomeDirectoryUtilsTest.java @@ -13,9 +13,10 @@ * permissions and limitations under the License. */ -package software.amazon.awssdk.profiles; +package software.amazon.awssdk.utils; import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.utils.UserHomeDirectoryUtils.userHomeDirectory; import java.util.Arrays; import java.util.HashMap; @@ -24,13 +25,10 @@ import org.junit.After; import org.junit.Before; import org.junit.Test; -import software.amazon.awssdk.profiles.ProfileFileLocation; import software.amazon.awssdk.testutils.EnvironmentVariableHelper; -/** - * Verify the functionality of {@link ProfileFileLocation}. - */ -public class ProfileFileLocationTest { +public class UserHomeDirectoryUtilsTest { + private final Map savedEnvironmentVariableValues = new HashMap<>(); private static final List SAVED_ENVIRONMENT_VARIABLES = Arrays.asList("HOME", @@ -46,11 +44,10 @@ public class ProfileFileLocationTest { */ @Before public void saveEnvironment() throws Exception { - // The tests in this file change the os.home for testing windows vs non-windows loading, and the static constructor for - // ProfileFileLocation currently loads the file system separator based on the os.home. We need to call the static - // constructor for ProfileFileLocation before changing the os.home so that it doesn't try to load the file system - // separator during the test. If we don't, it'll complain that it doesn't recognize the file system. - ProfileFileLocation.userHomeDirectory(); + // The tests in this file change the os.home for testing windows vs non-windows loading. We need to load the home + // directory that should be used for the stored file before changing the os.home so that it doesn't try to load + // the file system separator during the test. If we don't, it'll complain that it doesn't recognize the file system. + userHomeDirectory(); for (String variable : SAVED_ENVIRONMENT_VARIABLES) { savedEnvironmentVariableValues.put(variable, System.getenv(variable)); @@ -84,18 +81,18 @@ public void homeDirectoryResolutionPriorityIsCorrectOnWindows() throws Exception ENVIRONMENT_VARIABLE_HELPER.set("HOMEDRIVE", "homedrive"); ENVIRONMENT_VARIABLE_HELPER.set("HOMEPATH", "homepath"); - assertThat(ProfileFileLocation.userHomeDirectory()).isEqualTo("home"); + assertThat(userHomeDirectory()).isEqualTo("home"); ENVIRONMENT_VARIABLE_HELPER.remove("HOME"); - assertThat(ProfileFileLocation.userHomeDirectory()).isEqualTo("userprofile"); + assertThat(userHomeDirectory()).isEqualTo("userprofile"); ENVIRONMENT_VARIABLE_HELPER.remove("USERPROFILE"); - assertThat(ProfileFileLocation.userHomeDirectory()).isEqualTo("homedrivehomepath"); + assertThat(userHomeDirectory()).isEqualTo("homedrivehomepath"); ENVIRONMENT_VARIABLE_HELPER.remove("HOMEDRIVE"); ENVIRONMENT_VARIABLE_HELPER.remove("HOMEPATH"); - assertThat(ProfileFileLocation.userHomeDirectory()).isEqualTo(System.getProperty("user.home")); + assertThat(userHomeDirectory()).isEqualTo(System.getProperty("user.home")); } finally { System.setProperty("os.name", osName); } @@ -112,20 +109,20 @@ public void homeDirectoryResolutionPriorityIsCorrectOnNonWindows() throws Except ENVIRONMENT_VARIABLE_HELPER.set("HOMEDRIVE", "homedrive"); ENVIRONMENT_VARIABLE_HELPER.set("HOMEPATH", "homepath"); - assertThat(ProfileFileLocation.userHomeDirectory()).isEqualTo("home"); + assertThat(userHomeDirectory()).isEqualTo("home"); ENVIRONMENT_VARIABLE_HELPER.remove("HOME"); - assertThat(ProfileFileLocation.userHomeDirectory()).isEqualTo(System.getProperty("user.home")); + assertThat(userHomeDirectory()).isEqualTo(System.getProperty("user.home")); ENVIRONMENT_VARIABLE_HELPER.remove("USERPROFILE"); - assertThat(ProfileFileLocation.userHomeDirectory()).isEqualTo(System.getProperty("user.home")); + assertThat(userHomeDirectory()).isEqualTo(System.getProperty("user.home")); ENVIRONMENT_VARIABLE_HELPER.remove("HOMEDRIVE"); ENVIRONMENT_VARIABLE_HELPER.remove("HOMEPATH"); - assertThat(ProfileFileLocation.userHomeDirectory()).isEqualTo(System.getProperty("user.home")); + assertThat(userHomeDirectory()).isEqualTo(System.getProperty("user.home")); } finally { System.setProperty("os.name", osName); } } -} +} \ No newline at end of file diff --git a/utils/src/test/java/software/amazon/awssdk/utils/internal/MappingSubscriberTest.java b/utils/src/test/java/software/amazon/awssdk/utils/internal/MappingSubscriberTest.java new file mode 100644 index 000000000000..b4f750352ce8 --- /dev/null +++ b/utils/src/test/java/software/amazon/awssdk/utils/internal/MappingSubscriberTest.java @@ -0,0 +1,98 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.utils.internal; + +import static org.mockito.Mockito.reset; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.runners.MockitoJUnitRunner; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; + +@RunWith(MockitoJUnitRunner.class) +public class MappingSubscriberTest { + @Mock + private Subscription mockSubscription; + + @Mock + private Subscriber mockSubscriber; + + @Test + public void verifyNormalFlow() { + MappingSubscriber mappingSubscriber = + MappingSubscriber.create(mockSubscriber, String::toUpperCase); + + mappingSubscriber.onSubscribe(mockSubscription); + verify(mockSubscriber).onSubscribe(mockSubscription); + verifyNoMoreInteractions(mockSubscriber); + + reset(mockSubscriber); + mappingSubscriber.onNext("one"); + verify(mockSubscriber).onNext("ONE"); + verifyNoMoreInteractions(mockSubscriber); + + reset(mockSubscriber); + mappingSubscriber.onNext("two"); + verify(mockSubscriber).onNext("TWO"); + verifyNoMoreInteractions(mockSubscriber); + + reset(mockSubscriber); + mappingSubscriber.onComplete(); + verify(mockSubscriber).onComplete(); + verifyNoMoreInteractions(mockSubscriber); + } + + @Test + public void verifyMappingExceptionFlow() { + RuntimeException exception = new IllegalArgumentException("Twos are not supported"); + + MappingSubscriber mappingSubscriber = + MappingSubscriber.create(mockSubscriber, s -> { + if ("two".equals(s)) { + throw exception; + } + + return s.toUpperCase(); + }); + + mappingSubscriber.onSubscribe(mockSubscription); + verify(mockSubscriber).onSubscribe(mockSubscription); + verifyNoMoreInteractions(mockSubscriber); + + reset(mockSubscriber); + mappingSubscriber.onNext("one"); + verify(mockSubscriber).onNext("ONE"); + verifyNoMoreInteractions(mockSubscriber); + + reset(mockSubscriber); + mappingSubscriber.onNext("two"); + verify(mockSubscriber).onError(exception); + verifyNoMoreInteractions(mockSubscriber); + verify(mockSubscription).cancel(); + + reset(mockSubscriber); + mappingSubscriber.onNext("three"); + verifyNoMoreInteractions(mockSubscriber); + + reset(mockSubscriber); + mappingSubscriber.onComplete(); + verifyNoMoreInteractions(mockSubscriber); + } +} \ No newline at end of file