|
227 | 227 | {"shape":"StorageVirtualMachineNotFound"},
|
228 | 228 | {"shape":"UnsupportedOperation"}
|
229 | 229 | ],
|
230 |
| - "documentation":"<p>Creates an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS storage volume.</p>" |
| 230 | + "documentation":"<p>Creates an FSx for ONTAP or Amazon FSx for OpenZFS storage volume.</p>" |
231 | 231 | },
|
232 | 232 | "CreateVolumeFromBackup":{
|
233 | 233 | "name":"CreateVolumeFromBackup",
|
|
1195 | 1195 | "FileSystemId":{"shape":"FileSystemId"},
|
1196 | 1196 | "FileSystemPath":{
|
1197 | 1197 | "shape":"Namespace",
|
1198 |
| - "documentation":"<p>A path on the file system that points to a high-level directory (such as <code>/ns1/</code>) or subdirectory (such as <code>/ns1/subdir/</code>) that will be mapped 1-1 with <code>DataRepositoryPath</code>. The leading forward slash in the name is required. Two data repository associations cannot have overlapping file system paths. For example, if a data repository is associated with file system path <code>/ns1/</code>, then you cannot link another data repository with file system path <code>/ns1/ns2</code>.</p> <p>This path specifies where in your file system files will be exported from or imported to. This file system directory can be linked to only one Amazon S3 bucket, and no other S3 bucket can be linked to the directory.</p>" |
| 1198 | + "documentation":"<p>A path on the file system that points to a high-level directory (such as <code>/ns1/</code>) or subdirectory (such as <code>/ns1/subdir/</code>) that will be mapped 1-1 with <code>DataRepositoryPath</code>. The leading forward slash in the name is required. Two data repository associations cannot have overlapping file system paths. For example, if a data repository is associated with file system path <code>/ns1/</code>, then you cannot link another data repository with file system path <code>/ns1/ns2</code>.</p> <p>This path specifies where in your file system files will be exported from or imported to. This file system directory can be linked to only one Amazon S3 bucket, and no other S3 bucket can be linked to the directory.</p> <note> <p>If you specify only a forward slash (<code>/</code>) as the file system path, you can link only 1 data repository to the file system. You can only specify \"/\" as the file system path for the first data repository associated with a file system.</p> </note>" |
1199 | 1199 | },
|
1200 | 1200 | "DataRepositoryPath":{
|
1201 | 1201 | "shape":"ArchivePath",
|
|
1347 | 1347 | },
|
1348 | 1348 | "AutoImportPolicy":{
|
1349 | 1349 | "shape":"AutoImportPolicyType",
|
1350 |
| - "documentation":"<p> (Optional) Available with <code>Scratch</code> and <code>Persistent_1</code> deployment types. When you create your file system, your existing S3 objects appear as file and directory listings. Use this property to choose how Amazon FSx keeps your file and directory listings up to date as you add or modify objects in your linked S3 bucket. <code>AutoImportPolicy</code> can have the following values:</p> <ul> <li> <p> <code>NONE</code> - (Default) AutoImport is off. Amazon FSx only updates file and directory listings from the linked S3 bucket when the file system is created. FSx does not update file and directory listings for any new or changed objects after choosing this option.</p> </li> <li> <p> <code>NEW</code> - AutoImport is on. Amazon FSx automatically imports directory listings of any new objects added to the linked S3 bucket that do not currently exist in the FSx file system. </p> </li> <li> <p> <code>NEW_CHANGED</code> - AutoImport is on. Amazon FSx automatically imports file and directory listings of any new objects added to the S3 bucket and any existing objects that are changed in the S3 bucket after you choose this option.</p> </li> <li> <p> <code>NEW_CHANGED_DELETED</code> - AutoImport is on. Amazon FSx automatically imports file and directory listings of any new objects added to the S3 bucket, any existing objects that are changed in the S3 bucket, and any objects that were deleted in the S3 bucket.</p> </li> </ul> <p>For more information, see <a href=\"https://docs.aws.amazon.com/fsx/latest/LustreGuide/autoimport-data-repo.html\"> Automatically import updates from your S3 bucket</a>.</p> <note> <p>This parameter is not supported for file systems with the <code>Persistent_2</code> deployment type. Instead, use <code>CreateDataRepositoryAssociation\"</code> to create a data repository association to link your Lustre file system to a data repository.</p> </note>" |
| 1350 | + "documentation":"<p> (Optional) Available with <code>Scratch</code> and <code>Persistent_1</code> deployment types. When you create your file system, your existing S3 objects appear as file and directory listings. Use this property to choose how Amazon FSx keeps your file and directory listings up to date as you add or modify objects in your linked S3 bucket. <code>AutoImportPolicy</code> can have the following values:</p> <ul> <li> <p> <code>NONE</code> - (Default) AutoImport is off. Amazon FSx only updates file and directory listings from the linked S3 bucket when the file system is created. FSx does not update file and directory listings for any new or changed objects after choosing this option.</p> </li> <li> <p> <code>NEW</code> - AutoImport is on. Amazon FSx automatically imports directory listings of any new objects added to the linked S3 bucket that do not currently exist in the FSx file system. </p> </li> <li> <p> <code>NEW_CHANGED</code> - AutoImport is on. Amazon FSx automatically imports file and directory listings of any new objects added to the S3 bucket and any existing objects that are changed in the S3 bucket after you choose this option.</p> </li> <li> <p> <code>NEW_CHANGED_DELETED</code> - AutoImport is on. Amazon FSx automatically imports file and directory listings of any new objects added to the S3 bucket, any existing objects that are changed in the S3 bucket, and any objects that were deleted in the S3 bucket.</p> </li> </ul> <p>For more information, see <a href=\"https://docs.aws.amazon.com/fsx/latest/LustreGuide/older-deployment-types.html#legacy-auto-import-from-s3\"> Automatically import updates from your S3 bucket</a>.</p> <note> <p>This parameter is not supported for file systems with the <code>Persistent_2</code> deployment type. Instead, use <code>CreateDataRepositoryAssociation</code> to create a data repository association to link your Lustre file system to a data repository.</p> </note>" |
1351 | 1351 | },
|
1352 | 1352 | "PerUnitStorageThroughput":{
|
1353 | 1353 | "shape":"PerUnitStorageThroughput",
|
|
1389 | 1389 | },
|
1390 | 1390 | "EndpointIpAddressRange":{
|
1391 | 1391 | "shape":"IpAddressRange",
|
1392 |
| - "documentation":"<p>Specifies the IP address range in which the endpoints to access your file system will be created. By default, Amazon FSx selects an unused IP address range for you from the 198.19.* range.</p>" |
| 1392 | + "documentation":"<p>Specifies the IP address range in which the endpoints to access your file system will be created. By default, Amazon FSx selects an unused IP address range for you from the 198.19.* range.</p> <important> <p>The Endpoint IP address range you select for your file system must exist outside the VPC's CIDR range and must be at least /30 or larger.</p> </important>" |
1393 | 1393 | },
|
1394 | 1394 | "FsxAdminPassword":{
|
1395 | 1395 | "shape":"AdminPassword",
|
|
1434 | 1434 | "DailyAutomaticBackupStartTime":{"shape":"DailyTime"},
|
1435 | 1435 | "DeploymentType":{
|
1436 | 1436 | "shape":"OpenZFSDeploymentType",
|
1437 |
| - "documentation":"<p>Specifies the file system deployment type. Amazon FSx for OpenZFS supports <code>SINGLE_AZ_1</code>. <code>SINGLE_AZ_1</code> is a file system configured for a single Availability Zone (AZ) of redundancy.</p>" |
| 1437 | + "documentation":"<p>Specifies the file system deployment type. Amazon FSx for OpenZFS supports <code>SINGLE_AZ_1</code>. <code>SINGLE_AZ_1</code> deployment type is configured for redundancy within a single Availability Zone.</p>" |
1438 | 1438 | },
|
1439 | 1439 | "ThroughputCapacity":{
|
1440 | 1440 | "shape":"MegabytesPerSecond",
|
|
1616 | 1616 | "members":{
|
1617 | 1617 | "ParentVolumeId":{
|
1618 | 1618 | "shape":"VolumeId",
|
1619 |
| - "documentation":"<p>The ID of the volume to use as the parent volume.</p>" |
| 1619 | + "documentation":"<p>The ID of the volume to use as the parent volume of the volume that you are creating.</p>" |
1620 | 1620 | },
|
1621 | 1621 | "StorageCapacityReservationGiB":{
|
1622 | 1622 | "shape":"IntegerNoMaxFromNegativeOne",
|
1623 |
| - "documentation":"<p>The amount of storage in gibibytes (GiB) to reserve from the parent volume. You can't reserve more storage than the parent volume has reserved. To not specify a storage capacity reservation, set this to <code>-1</code>.</p>" |
| 1623 | + "documentation":"<p>Specifies the amount of storage in gibibytes (GiB) to reserve from the parent volume. Setting <code>StorageCapacityReservationGiB</code> guarantees that the specified amount of storage space on the parent volume will always be available for the volume. You can't reserve more storage than the parent volume has. To <i>not</i> specify a storage capacity reservation, set this to <code>0</code> or <code>-1</code>. For more information, see <a href=\"https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/managing-volumes.html#volume-properties\">Volume properties</a> in the <i>Amazon FSx for OpenZFS User Guide</i>.</p>" |
1624 | 1624 | },
|
1625 | 1625 | "StorageCapacityQuotaGiB":{
|
1626 | 1626 | "shape":"IntegerNoMaxFromNegativeOne",
|
1627 |
| - "documentation":"<p>The maximum amount of storage in gibibytes (GiB) that the volume can use from its parent. You can't specify a quota larger than the storage on the parent volume. To not specify a storage capacity quota, set this to <code>-1</code>. </p>" |
| 1627 | + "documentation":"<p>Sets the maximum storage size in gibibytes (GiB) for the volume. You can specify a quota that is larger than the storage on the parent volume. A volume quota limits the amount of storage that the volume can consume to the configured amount, but does not guarantee the space will be available on the parent volume. To guarantee quota space, you must also set <code>StorageCapacityReservationGiB</code>. To <i>not</i> specify a storage capacity quota, set this to <code>-1</code>. </p> <p>For more information, see <a href=\"https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/managing-volumes.html#volume-properties\">Volume properties</a> in the <i>Amazon FSx for OpenZFS User Guide</i>.</p>" |
1628 | 1628 | },
|
1629 | 1629 | "RecordSizeKiB":{
|
1630 | 1630 | "shape":"IntegerRecordSizeKiB",
|
1631 |
| - "documentation":"<p>Specifies the record size of an OpenZFS volume, in kibibytes (KiB). Valid values are 4, 8, 16, 32, 64, 128, 256, 512, or 1024 KiB. The default is 128 KiB. Most workloads should use the default record size. Database workflows can benefit from a smaller record size, while streaming workflows can benefit from a larger record size. For additional guidance on when to set a custom record size, see <a href=\"https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/performance.html#performance-tips-zfs\"> Tips for maximizing performance</a> in the <i>Amazon FSx for OpenZFS User Guide</i>.</p>" |
| 1631 | + "documentation":"<p>Specifies the suggested block size for a volume in a ZFS dataset, in kibibytes (KiB). Valid values are 4, 8, 16, 32, 64, 128, 256, 512, or 1024 KiB. The default is 128 KiB. We recommend using the default setting for the majority of use cases. Generally, workloads that write in fixed small or large record sizes may benefit from setting a custom record size, like database workloads (small record size) or media streaming workloads (large record size). For additional guidance on when to set a custom record size, see <a href=\"https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/performance.html#record-size-performance\"> ZFS Record size</a> in the <i>Amazon FSx for OpenZFS User Guide</i>.</p>" |
1632 | 1632 | },
|
1633 | 1633 | "DataCompressionType":{
|
1634 | 1634 | "shape":"OpenZFSDataCompressionType",
|
1635 |
| - "documentation":"<p>Specifies the method used to compress the data on the volume. The compression type is <code>NONE</code> by default.</p> <ul> <li> <p> <code>NONE</code> - Doesn't compress the data on the volume. <code>NONE</code> is the default.</p> </li> <li> <p> <code>ZSTD</code> - Compresses the data in the volume using the Zstandard (ZSTD) compression algorithm. Compared to LZ4, Z-Standard provides a better compression ratio to minimize on-disk storage utilization.</p> </li> <li> <p> <code>LZ4</code> - Compresses the data in the volume using the LZ4 compression algorithm. Compared to Z-Standard, LZ4 is less compute-intensive and delivers higher write throughput speeds.</p> </li> </ul>" |
| 1635 | + "documentation":"<p>Specifies the method used to compress the data on the volume. The compression type is <code>NONE</code> by default.</p> <ul> <li> <p> <code>NONE</code> - Doesn't compress the data on the volume. <code>NONE</code> is the default.</p> </li> <li> <p> <code>ZSTD</code> - Compresses the data in the volume using the Zstandard (ZSTD) compression algorithm. ZSTD compression provides a higher level of data compression and higher read throughput performance than LZ4 compression.</p> </li> <li> <p> <code>LZ4</code> - Compresses the data in the volume using the LZ4 compression algorithm. LZ4 compression provides a lower level of compression and higher write throughput performance than ZSTD compression.</p> </li> </ul> <p>For more information about volume compression types and the performance of your Amazon FSx for OpenZFS file system, see <a href=\"https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/performance.html#performance-tips-zfs\"> Tips for maximizing performance</a> File system and volume settings in the <i>Amazon FSx for OpenZFS User Guide</i>.</p>" |
1636 | 1636 | },
|
1637 | 1637 | "CopyTagsToSnapshots":{
|
1638 | 1638 | "shape":"Flag",
|
|
1852 | 1852 | "FailureDetails":{"shape":"DataRepositoryFailureDetails"},
|
1853 | 1853 | "FileSystemPath":{
|
1854 | 1854 | "shape":"Namespace",
|
1855 |
| - "documentation":"<p>A path on the file system that points to a high-level directory (such as <code>/ns1/</code>) or subdirectory (such as <code>/ns1/subdir/</code>) that will be mapped 1-1 with <code>DataRepositoryPath</code>. The leading forward slash in the name is required. Two data repository associations cannot have overlapping file system paths. For example, if a data repository is associated with file system path <code>/ns1/</code>, then you cannot link another data repository with file system path <code>/ns1/ns2</code>.</p> <p>This path specifies where in your file system files will be exported from or imported to. This file system directory can be linked to only one Amazon S3 bucket, and no other S3 bucket can be linked to the directory. </p>" |
| 1855 | + "documentation":"<p>A path on the file system that points to a high-level directory (such as <code>/ns1/</code>) or subdirectory (such as <code>/ns1/subdir/</code>) that will be mapped 1-1 with <code>DataRepositoryPath</code>. The leading forward slash in the name is required. Two data repository associations cannot have overlapping file system paths. For example, if a data repository is associated with file system path <code>/ns1/</code>, then you cannot link another data repository with file system path <code>/ns1/ns2</code>.</p> <p>This path specifies where in your file system files will be exported from or imported to. This file system directory can be linked to only one Amazon S3 bucket, and no other S3 bucket can be linked to the directory.</p> <note> <p>If you specify only a forward slash (<code>/</code>) as the file system path, you can link only 1 data repository to the file system. You can only specify \"/\" as the file system path for the first data repository associated with a file system.</p> </note>" |
1856 | 1856 | },
|
1857 | 1857 | "DataRepositoryPath":{
|
1858 | 1858 | "shape":"ArchivePath",
|
|
3373 | 3373 | "type":"string",
|
3374 | 3374 | "max":4096,
|
3375 | 3375 | "min":1,
|
3376 |
| - "pattern":"^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{3,4096}$" |
| 3376 | + "pattern":"^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{1,4096}$" |
3377 | 3377 | },
|
3378 | 3378 | "NetBiosAlias":{
|
3379 | 3379 | "type":"string",
|
|
3435 | 3435 | },
|
3436 | 3436 | "EndpointIpAddressRange":{
|
3437 | 3437 | "shape":"IpAddressRange",
|
3438 |
| - "documentation":"<p>The IP address range in which the endpoints to access your file system are created.</p>" |
| 3438 | + "documentation":"<p>The IP address range in which the endpoints to access your file system are created.</p> <important> <p>The Endpoint IP address range you select for your file system must exist outside the VPC's CIDR range and must be at least /30 or larger. If you do not specify this optional parameter, Amazon FSx will automatically select a CIDR block for you.</p> </important>" |
3439 | 3439 | },
|
3440 | 3440 | "Endpoints":{
|
3441 | 3441 | "shape":"FileSystemEndpoints",
|
|
3626 | 3626 | "documentation":"<p>A list of configuration objects that contain the client and options for mounting the OpenZFS file system. </p>"
|
3627 | 3627 | }
|
3628 | 3628 | },
|
3629 |
| - "documentation":"<p>The Network File System NFS) configurations for mounting an Amazon FSx for OpenZFS file system. </p>" |
| 3629 | + "documentation":"<p>The Network File System (NFS) configurations for mounting an Amazon FSx for OpenZFS file system. </p>" |
3630 | 3630 | },
|
3631 | 3631 | "OpenZFSNfsExportOption":{
|
3632 | 3632 | "type":"string",
|
|
4299 | 4299 | "members":{
|
4300 | 4300 | "Message":{"shape":"ErrorMessage"}
|
4301 | 4301 | },
|
4302 |
| - "documentation":"<p>No Amazon FSx for NetApp ONTAP SVMs were found based upon the supplied parameters.</p>", |
| 4302 | + "documentation":"<p>No FSx for ONTAP SVMs were found based upon the supplied parameters.</p>", |
4303 | 4303 | "exception":true
|
4304 | 4304 | },
|
4305 | 4305 | "StorageVirtualMachineRootVolumeSecurityStyle":{
|
|
0 commit comments